Java Code Examples for org.apache.hadoop.fs.StorageType#SSD

The following examples show how to use org.apache.hadoop.fs.StorageType#SSD . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HDFSBlocksDistribution.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * add some weight to a list of hosts, update the value of unique block weight
 * @param hosts the list of the host
 * @param weight the weight
 */
public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) {
  if (hosts == null || hosts.length == 0) {
    // erroneous data
    return;
  }

  addUniqueWeight(weight);
  if (storageTypes != null && storageTypes.length == hosts.length) {
    for (int i = 0; i < hosts.length; i++) {
      long weightForSsd = 0;
      if (storageTypes[i] == StorageType.SSD) {
        weightForSsd = weight;
      }
      addHostAndBlockWeight(hosts[i], weight, weightForSsd);
    }
  } else {
    for (String hostname : hosts) {
      addHostAndBlockWeight(hostname, weight, 0);
    }
  }
}
 
Example 2
Source File: TestPBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 3
Source File: TestPBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 4
Source File: StorageLocationReport.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private static StorageType getStorageType(StorageTypeProto proto) throws
    IOException {
  StorageType storageType;
  switch (proto) {
  case SSD:
    storageType = StorageType.SSD;
    break;
  case DISK:
    storageType = StorageType.DISK;
    break;
  case ARCHIVE:
    storageType = StorageType.ARCHIVE;
    break;
  case PROVIDED:
    storageType = StorageType.PROVIDED;
    break;
  case RAM_DISK:
    storageType = StorageType.RAM_DISK;
    break;
  default:
    throw new IOException("Illegal Storage Type specified");
  }
  return storageType;
}
 
Example 5
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static StorageType convertStorageType(StorageTypeProto type) {
  switch(type) {
    case DISK:
      return StorageType.DISK;
    case SSD:
      return StorageType.SSD;
    case ARCHIVE:
      return StorageType.ARCHIVE;
    case RAM_DISK:
      return StorageType.RAM_DISK;
    default:
      throw new IllegalStateException(
          "BUG: StorageTypeProto not found, type=" + type);
  }
}
 
Example 6
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void TestConvertDatanodeStorage() {
  DatanodeStorage dns1 = new DatanodeStorage(
      "id1", DatanodeStorage.State.NORMAL, StorageType.SSD);

  DatanodeStorageProto proto = PBHelper.convert(dns1);
  DatanodeStorage dns2 = PBHelper.convert(proto);
  compare(dns1, dns2);
}
 
Example 7
Source File: TestPBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void TestConvertDatanodeStorage() {
  DatanodeStorage dns1 = new DatanodeStorage(
      "id1", DatanodeStorage.State.NORMAL, StorageType.SSD);

  DatanodeStorageProto proto = PBHelper.convert(dns1);
  DatanodeStorage dns2 = PBHelper.convert(proto);
  compare(dns1, dns2);
}
 
Example 8
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static StorageType convertStorageType(StorageTypeProto type) {
  switch(type) {
    case DISK:
      return StorageType.DISK;
    case SSD:
      return StorageType.SSD;
    case ARCHIVE:
      return StorageType.ARCHIVE;
    case RAM_DISK:
      return StorageType.RAM_DISK;
    default:
      throw new IllegalStateException(
          "BUG: StorageTypeProto not found, type=" + type);
  }
}
 
Example 9
Source File: BlockStoragePolicySuite.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
  final BlockStoragePolicy[] policies =
      new BlockStoragePolicy[1 << ID_BIT_LENGTH];
  final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
  policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, 
      HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      true);    // Cannot be changed on regular files, but inherited.
  final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
  policies[allssdId] = new BlockStoragePolicy(allssdId,
      HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK});
  final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
  policies[onessdId] = new BlockStoragePolicy(onessdId,
      HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK});
  final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
  policies[hotId] = new BlockStoragePolicy(hotId,
      HdfsConstants.HOT_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
      new StorageType[]{StorageType.ARCHIVE});
  final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
  policies[warmId] = new BlockStoragePolicy(warmId,
      HdfsConstants.WARM_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
  final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
  policies[coldId] = new BlockStoragePolicy(coldId,
      HdfsConstants.COLD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
      StorageType.EMPTY_ARRAY);
  return new BlockStoragePolicySuite(hotId, policies);
}
 
Example 10
Source File: TestDatanodeManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 11
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testQuotaByStorageTypePersistenceInFsImage() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path testDir = new Path(dir, METHOD_NAME);
  Path createdFile1 = new Path(testDir, "created_file1.data");
  dfs.mkdirs(testDir);

  // set storage policy on testDir to ONESSD
  dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on testDir
  final long SSD_QUOTA = BLOCKSIZE * 4;
  dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
  INode testDirNode = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under testDir
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before namenode restart
  long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Restart the namenode with checkpoint to make sure fsImage is correct
  dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  dfs.saveNamespace();
  dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
  assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
  for (StorageType t: StorageType.getTypesSupportingQuota()) {
    if (t != StorageType.SSD) {
      assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
    }
  }

  long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
      .getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
 
Example 12
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testQuotaByStorageTypePersistenceInEditLog() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path testDir = new Path(dir, METHOD_NAME);
  Path createdFile1 = new Path(testDir, "created_file1.data");
  dfs.mkdirs(testDir);

  // set storage policy on testDir to ONESSD
  dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on testDir
  final long SSD_QUOTA = BLOCKSIZE * 4;
  dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
  INode testDirNode = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under testDir
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before namenode restart
  long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Restart namenode to make sure the editlog is correct
  cluster.restartNameNode(true);

  INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
  // Verify quota is still set
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
  assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
  for (StorageType t: StorageType.getTypesSupportingQuota()) {
    if (t != StorageType.SSD) {
      assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
    }
  }

  long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
      .getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
 
Example 13
Source File: TestBlockStoragePolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 14
Source File: TestBlockStoragePolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 15
Source File: TestDatanodeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 16
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testQuotaByStorageTypePersistenceInFsImage() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path testDir = new Path(dir, METHOD_NAME);
  Path createdFile1 = new Path(testDir, "created_file1.data");
  dfs.mkdirs(testDir);

  // set storage policy on testDir to ONESSD
  dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on testDir
  final long SSD_QUOTA = BLOCKSIZE * 4;
  dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
  INode testDirNode = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under testDir
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before namenode restart
  long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Restart the namenode with checkpoint to make sure fsImage is correct
  dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  dfs.saveNamespace();
  dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
  assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
  for (StorageType t: StorageType.getTypesSupportingQuota()) {
    if (t != StorageType.SSD) {
      assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
    }
  }

  long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
      .getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
 
Example 17
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testQuotaByStorageTypePersistenceInEditLog() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path testDir = new Path(dir, METHOD_NAME);
  Path createdFile1 = new Path(testDir, "created_file1.data");
  dfs.mkdirs(testDir);

  // set storage policy on testDir to ONESSD
  dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on testDir
  final long SSD_QUOTA = BLOCKSIZE * 4;
  dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
  INode testDirNode = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under testDir
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before namenode restart
  long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Restart namenode to make sure the editlog is correct
  cluster.restartNameNode(true);

  INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
  // Verify quota is still set
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
  assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
  for (StorageType t: StorageType.getTypesSupportingQuota()) {
    if (t != StorageType.SSD) {
      assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
    }
  }

  long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
      .getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
 
Example 18
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 19
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 20
Source File: BlockStoragePolicySuite.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
  final BlockStoragePolicy[] policies =
      new BlockStoragePolicy[1 << ID_BIT_LENGTH];
  final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
  policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, 
      HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      true);    // Cannot be changed on regular files, but inherited.
  final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
  policies[allssdId] = new BlockStoragePolicy(allssdId,
      HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK});
  final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
  policies[onessdId] = new BlockStoragePolicy(onessdId,
      HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK});
  final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
  policies[hotId] = new BlockStoragePolicy(hotId,
      HdfsConstants.HOT_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
      new StorageType[]{StorageType.ARCHIVE});
  final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
  policies[warmId] = new BlockStoragePolicy(warmId,
      HdfsConstants.WARM_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
  final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
  policies[coldId] = new BlockStoragePolicy(coldId,
      HdfsConstants.COLD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
      StorageType.EMPTY_ARRAY);
  return new BlockStoragePolicySuite(hotId, policies);
}