Java Code Examples for org.apache.hadoop.fs.StorageType#DISK

The following examples show how to use org.apache.hadoop.fs.StorageType#DISK . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StorageLocationReport.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private static StorageType getStorageType(StorageTypeProto proto) throws
    IOException {
  StorageType storageType;
  switch (proto) {
  case SSD:
    storageType = StorageType.SSD;
    break;
  case DISK:
    storageType = StorageType.DISK;
    break;
  case ARCHIVE:
    storageType = StorageType.ARCHIVE;
    break;
  case PROVIDED:
    storageType = StorageType.PROVIDED;
    break;
  case RAM_DISK:
    storageType = StorageType.RAM_DISK;
    break;
  default:
    throw new IOException("Illegal Storage Type specified");
  }
  return storageType;
}
 
Example 2
Source File: TestStorageMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static StorageType[][] genStorageTypes(int numDataNodes,
    int numAllDisk, int numAllArchive, int numRamDisk) {
  Preconditions.checkArgument(
    (numAllDisk + numAllArchive + numRamDisk) <= numDataNodes);

  StorageType[][] types = new StorageType[numDataNodes][];
  int i = 0;
  for (; i < numRamDisk; i++)
  {
    types[i] = new StorageType[]{StorageType.RAM_DISK, StorageType.DISK};
  }
  for (; i < numRamDisk + numAllDisk; i++) {
    types[i] = new StorageType[]{StorageType.DISK, StorageType.DISK};
  }
  for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
    types[i] = new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE};
  }
  for (; i < types.length; i++) {
    types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
  }
  return types;
}
 
Example 3
Source File: TestStorageMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
private Replication getOrVerifyReplication(Path file, Replication expected)
    throws IOException {
  final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
      file.toString(), 0).getLocatedBlocks();
  Assert.assertEquals(1, lbs.size());

  LocatedBlock lb = lbs.get(0);
  StringBuilder types = new StringBuilder(); 
  final Replication r = new Replication();
  for(StorageType t : lb.getStorageTypes()) {
    types.append(t).append(", ");
    if (t == StorageType.DISK) {
      r.disk++;
    } else if (t == StorageType.ARCHIVE) {
      r.archive++;
    } else {
      Assert.fail("Unexpected storage type " + t);
    }
  }

  if (expected != null) {
    final String s = "file = " + file + "\n  types = [" + types + "]";
    Assert.assertEquals(s, expected, r);
  }
  return r;
}
 
Example 4
Source File: TestPBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 5
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private Replication getOrVerifyReplication(Path file, Replication expected)
    throws IOException {
  final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
      file.toString(), 0).getLocatedBlocks();
  Assert.assertEquals(1, lbs.size());

  LocatedBlock lb = lbs.get(0);
  StringBuilder types = new StringBuilder(); 
  final Replication r = new Replication();
  for(StorageType t : lb.getStorageTypes()) {
    types.append(t).append(", ");
    if (t == StorageType.DISK) {
      r.disk++;
    } else if (t == StorageType.ARCHIVE) {
      r.archive++;
    } else {
      Assert.fail("Unexpected storage type " + t);
    }
  }

  if (expected != null) {
    final String s = "file = " + file + "\n  types = [" + types + "]";
    Assert.assertEquals(s, expected, r);
  }
  return r;
}
 
Example 6
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static StorageType[][] genStorageTypes(int numDataNodes,
    int numAllDisk, int numAllArchive, int numRamDisk) {
  Preconditions.checkArgument(
    (numAllDisk + numAllArchive + numRamDisk) <= numDataNodes);

  StorageType[][] types = new StorageType[numDataNodes][];
  int i = 0;
  for (; i < numRamDisk; i++)
  {
    types[i] = new StorageType[]{StorageType.RAM_DISK, StorageType.DISK};
  }
  for (; i < numRamDisk + numAllDisk; i++) {
    types[i] = new StorageType[]{StorageType.DISK, StorageType.DISK};
  }
  for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
    types[i] = new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE};
  }
  for (; i < types.length; i++) {
    types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
  }
  return types;
}
 
Example 7
Source File: TestPBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 8
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
 
Example 9
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static StorageType convertStorageType(StorageTypeProto type) {
  switch(type) {
    case DISK:
      return StorageType.DISK;
    case SSD:
      return StorageType.SSD;
    case ARCHIVE:
      return StorageType.ARCHIVE;
    case RAM_DISK:
      return StorageType.RAM_DISK;
    default:
      throw new IllegalStateException(
          "BUG: StorageTypeProto not found, type=" + type);
  }
}
 
Example 10
Source File: TestPBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
 
Example 11
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static StorageType[][] genStorageTypes(int numDataNodes) {
  StorageType[][] types = new StorageType[numDataNodes][];
  for (int i = 0; i < types.length; i++) {
    types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
  }
  return types;
}
 
Example 12
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static StorageType convertStorageType(StorageTypeProto type) {
  switch(type) {
    case DISK:
      return StorageType.DISK;
    case SSD:
      return StorageType.SSD;
    case ARCHIVE:
      return StorageType.ARCHIVE;
    case RAM_DISK:
      return StorageType.RAM_DISK;
    default:
      throw new IllegalStateException(
          "BUG: StorageTypeProto not found, type=" + type);
  }
}
 
Example 13
Source File: TestDatanodeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 14
Source File: BlockStoragePolicySuite.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
  final BlockStoragePolicy[] policies =
      new BlockStoragePolicy[1 << ID_BIT_LENGTH];
  final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
  policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, 
      HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      true);    // Cannot be changed on regular files, but inherited.
  final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
  policies[allssdId] = new BlockStoragePolicy(allssdId,
      HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK});
  final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
  policies[onessdId] = new BlockStoragePolicy(onessdId,
      HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK});
  final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
  policies[hotId] = new BlockStoragePolicy(hotId,
      HdfsConstants.HOT_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
      new StorageType[]{StorageType.ARCHIVE});
  final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
  policies[warmId] = new BlockStoragePolicy(warmId,
      HdfsConstants.WARM_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
  final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
  policies[coldId] = new BlockStoragePolicy(coldId,
      HdfsConstants.COLD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
      StorageType.EMPTY_ARRAY);
  return new BlockStoragePolicySuite(hotId, policies);
}
 
Example 15
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 16
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 17
Source File: TestBlockStoragePolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 18
Source File: TestBlockStoragePolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 19
Source File: TestDatanodeManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 20
Source File: BlockStoragePolicySuite.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
  final BlockStoragePolicy[] policies =
      new BlockStoragePolicy[1 << ID_BIT_LENGTH];
  final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
  policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, 
      HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK},
      true);    // Cannot be changed on regular files, but inherited.
  final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
  policies[allssdId] = new BlockStoragePolicy(allssdId,
      HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD},
      new StorageType[]{StorageType.DISK},
      new StorageType[]{StorageType.DISK});
  final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
  policies[onessdId] = new BlockStoragePolicy(onessdId,
      HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK},
      new StorageType[]{StorageType.SSD, StorageType.DISK});
  final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
  policies[hotId] = new BlockStoragePolicy(hotId,
      HdfsConstants.HOT_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
      new StorageType[]{StorageType.ARCHIVE});
  final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
  policies[warmId] = new BlockStoragePolicy(warmId,
      HdfsConstants.WARM_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
      new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
  final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
  policies[coldId] = new BlockStoragePolicy(coldId,
      HdfsConstants.COLD_STORAGE_POLICY_NAME,
      new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
      StorageType.EMPTY_ARRAY);
  return new BlockStoragePolicySuite(hotId, policies);
}