Java Code Examples for org.apache.hadoop.net.NetworkTopology#add()
The following examples show how to use
org.apache.hadoop.net.NetworkTopology#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestConfigurableBlockPlacement.java From RDFS with Apache License 2.0 | 6 votes |
private VerifiablePolicy initTest() throws Exception { VerifiablePolicy policy = new VerifiablePolicy(); Configuration conf = new Configuration(); TestClusterStats stats = new TestClusterStats(); NetworkTopology clusterMap = new NetworkTopology(); TestHostsReader hostsReader = new TestHostsReader(); TestMapping dnsToSwitchMapping = new TestMapping(); for (DatanodeDescriptor d: dataNodes) { clusterMap.add(d); } conf.setInt("dfs.replication.rackwindow", 2); conf.setInt("dfs.replication.machineWindow", 2); policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null); return policy; }
Example 2
Source File: BlockReconstructor.java From RDFS with Apache License 2.0 | 6 votes |
/** * Return the distances among an array of nodes * @param nodes * the nodes array * @return * an distance matrix, distance of nodes[i][j] is put in * element indexed by [i][j] */ static public int[][] getRealDistances(DatanodeInfo[] nodes) { // TODO Auto-generated method stub int[][] distances = new int[nodes.length][nodes.length]; NetworkTopology netTopology = new NetworkTopology(); for(int i = 0; i < distances.length; i++) netTopology.add(nodes[i]); for (int i = 0; i < distances.length; i++) { for (int j = 0; j < i; j++) { distances[i][j] = netTopology.getDistance(nodes[i], nodes[j]); distances[j][i] = distances[i][j]; } distances[i][i] = 0; } return distances; }
Example 3
Source File: TestBlockManager.java From hadoop with Apache License 2.0 | 5 votes |
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) { NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.getStorageInfos()[0].setUtilizationForTesting( 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn); } }
Example 4
Source File: TestBlockManager.java From big-c with Apache License 2.0 | 5 votes |
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) { NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.getStorageInfos()[0].setUtilizationForTesting( 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn); } }
Example 5
Source File: TestBlockStoragePolicy.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testChooseTargetWithTopology() throws Exception { BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[]{StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2", new StorageType[]{StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"}; final String[] hosts = {"host1", "host2", "host3"}; final StorageType[] types = {StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE}; final DatanodeStorageInfo[] storages = DFSTestUtil .createDatanodeStorageInfos(3, racks, hosts, types); final DatanodeDescriptor[] dataNodes = DFSTestUtil .toDatanodeDescriptor(storages); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(conf); NameNode namenode = new NameNode(conf); final BlockManager bm = namenode.getNamesystem().getBlockManager(); BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode); } DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy1); System.out.println(Arrays.asList(targets)); Assert.assertEquals(3, targets.length); targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy2); System.out.println(Arrays.asList(targets)); Assert.assertEquals(3, targets.length); }
Example 6
Source File: TestBlockStoragePolicy.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testChooseSsdOverDisk() throws Exception { BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[]{StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"}; final String[] hosts = {"host1", "host2", "host3"}; final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK}; final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks); final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages); for(int i = 0; i < dataNodes.length; i++) { BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD)); } FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(conf); NameNode namenode = new NameNode(conf); final BlockManager bm = namenode.getNamesystem().getBlockManager(); BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode); } DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy); System.out.println(policy.getName() + ": " + Arrays.asList(targets)); Assert.assertEquals(2, targets.length); Assert.assertEquals(StorageType.SSD, targets[0].getStorageType()); Assert.assertEquals(StorageType.DISK, targets[1].getStorageType()); }
Example 7
Source File: TestFsck.java From hadoop with Apache License 2.0 | 4 votes |
/** * Tests that the # of misreplaced replicas is correct * @throws IOException */ @Test public void testFsckMisPlacedReplicas() throws IOException { // Desired replication factor final short REPL_FACTOR = 2; // Number of replicas to actually start short NUM_DN = 2; // Number of blocks to write final short NUM_BLOCKS = 3; // Set a small-ish blocksize final long blockSize = 512; String [] racks = {"/rack1", "/rack1"}; String [] hosts = {"host1", "host2"}; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); MiniDFSCluster cluster = null; DistributedFileSystem dfs = null; try { // Startup a minicluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated final String pathString = new String("/testfile"); final Path path = new Path(pathString); long fileLen = blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1); // Create an under-replicated file NameNode namenode = cluster.getNameNode(); NetworkTopology nettop = cluster.getNamesystem().getBlockManager() .getDatanodeManager().getNetworkTopology(); // Add a new node on different rack, so previous blocks' replicas // are considered to be misplaced nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3")); NUM_DN++; Map<String,String[]> pmap = new HashMap<String, String[]>(); Writer result = new StringWriter(); PrintWriter out = new PrintWriter(result, true); InetAddress remoteAddress = InetAddress.getLocalHost(); NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, NUM_DN, remoteAddress); // Run the fsck and check the Result final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res = new Result(conf); fsck.check(pathString, file, res); // check misReplicatedBlock number. assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS); } finally { if(dfs != null) { dfs.close(); } if(cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestBlockStoragePolicy.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testChooseTargetWithTopology() throws Exception { BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[]{StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2", new StorageType[]{StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"}; final String[] hosts = {"host1", "host2", "host3"}; final StorageType[] types = {StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE}; final DatanodeStorageInfo[] storages = DFSTestUtil .createDatanodeStorageInfos(3, racks, hosts, types); final DatanodeDescriptor[] dataNodes = DFSTestUtil .toDatanodeDescriptor(storages); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(conf); NameNode namenode = new NameNode(conf); final BlockManager bm = namenode.getNamesystem().getBlockManager(); BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode); } DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy1); System.out.println(Arrays.asList(targets)); Assert.assertEquals(3, targets.length); targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy2); System.out.println(Arrays.asList(targets)); Assert.assertEquals(3, targets.length); }
Example 9
Source File: TestBlockStoragePolicy.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testChooseSsdOverDisk() throws Exception { BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[]{StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"}; final String[] hosts = {"host1", "host2", "host3"}; final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK}; final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks); final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages); for(int i = 0; i < dataNodes.length; i++) { BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD)); } FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(conf); NameNode namenode = new NameNode(conf); final BlockManager bm = namenode.getNamesystem().getBlockManager(); BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode); } DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy); System.out.println(policy.getName() + ": " + Arrays.asList(targets)); Assert.assertEquals(2, targets.length); Assert.assertEquals(StorageType.SSD, targets[0].getStorageType()); Assert.assertEquals(StorageType.DISK, targets[1].getStorageType()); }
Example 10
Source File: TestFsck.java From big-c with Apache License 2.0 | 4 votes |
/** * Tests that the # of misreplaced replicas is correct * @throws IOException */ @Test public void testFsckMisPlacedReplicas() throws IOException { // Desired replication factor final short REPL_FACTOR = 2; // Number of replicas to actually start short NUM_DN = 2; // Number of blocks to write final short NUM_BLOCKS = 3; // Set a small-ish blocksize final long blockSize = 512; String [] racks = {"/rack1", "/rack1"}; String [] hosts = {"host1", "host2"}; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); MiniDFSCluster cluster = null; DistributedFileSystem dfs = null; try { // Startup a minicluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated final String pathString = new String("/testfile"); final Path path = new Path(pathString); long fileLen = blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1); // Create an under-replicated file NameNode namenode = cluster.getNameNode(); NetworkTopology nettop = cluster.getNamesystem().getBlockManager() .getDatanodeManager().getNetworkTopology(); // Add a new node on different rack, so previous blocks' replicas // are considered to be misplaced nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3")); NUM_DN++; Map<String,String[]> pmap = new HashMap<String, String[]>(); Writer result = new StringWriter(); PrintWriter out = new PrintWriter(result, true); InetAddress remoteAddress = InetAddress.getLocalHost(); NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, NUM_DN, remoteAddress); // Run the fsck and check the Result final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res = new Result(conf); fsck.check(pathString, file, res); // check misReplicatedBlock number. assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS); } finally { if(dfs != null) { dfs.close(); } if(cluster != null) { cluster.shutdown(); } } }
Example 11
Source File: TestConfigurableBlockPlacement.java From RDFS with Apache License 2.0 | 4 votes |
public void testChooseTarget() throws Exception { VerifiablePolicy policy = new VerifiablePolicy(); Configuration conf = new Configuration(); TestClusterStats stats = new TestClusterStats(); NetworkTopology clusterMap = new NetworkTopology(); TestHostsReader hostsReader = new TestHostsReader(); TestMapping dnsToSwitchMapping = new TestMapping(); for (DatanodeDescriptor d: dataNodes) { clusterMap.add(d); } conf.setInt("dfs.replication.rackwindow", 2); conf.setInt("dfs.replication.machineWindow", 2); policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null); HashMap <Node, Node> emptyMap = new HashMap<Node, Node>(); List <DatanodeDescriptor> results = new ArrayList <DatanodeDescriptor>(); DatanodeDescriptor writer = dataNodes[0]; // Replication Factor 2 DatanodeDescriptor fwriter = policy.chooseTarget(2, writer, emptyMap, 512, 4, results, true); assertEquals(writer.getNetworkLocation(), fwriter.getNetworkLocation()); assertEquals(writer.getNetworkLocation(), results.get(0).getNetworkLocation()); assertEquals(results.get(0).getNetworkLocation(), results.get(1).getNetworkLocation()); assertFalse(results.get(0).getHost().equals( results.get(1).getHost())); results.clear(); emptyMap.clear(); writer = dataNodes[0]; // Replication Factor 3 fwriter = policy.chooseTarget(3, writer, emptyMap, 512, 4, results, true); assertEquals(writer.getNetworkLocation(), fwriter.getNetworkLocation()); assertEquals(writer.getNetworkLocation(), results.get(0).getNetworkLocation()); assertEquals(results.get(1).getNetworkLocation(), results.get(2).getNetworkLocation()); assertFalse(results.get(0).getNetworkLocation().equals( results.get(1).getNetworkLocation())); }
Example 12
Source File: TestConfigurableBlockPlacement.java From RDFS with Apache License 2.0 | 4 votes |
public void testFindBest() throws Exception { VerifiablePolicy policy = new VerifiablePolicy(); Configuration conf = new Configuration(); TestClusterStats stats = new TestClusterStats(); NetworkTopology clusterMap = new NetworkTopology(); TestHostsReader hostsReader = new TestHostsReader(); TestMapping dnsToSwitchMapping = new TestMapping(); for (DatanodeDescriptor d: dataNodes) { clusterMap.add(d); } conf.setInt("dfs.replication.rackwindow", 2); conf.setInt("dfs.replication.machineWindow", 2); policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null); DatanodeDescriptor[] r; r = policy.findBest(Arrays.asList( dataNodes[2], dataNodes[9], dataNodes[10], dataNodes[11], dataNodes[12], dataNodes[8], dataNodes[7])); assertEquals(dataNodes[2],r[0]); assertEquals(dataNodes[8],r[1]); assertEquals(dataNodes[7],r[2]); conf.setInt("dfs.replication.rackwindow", 1); conf.setInt("dfs.replication.machineWindow", 2); policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null); r = policy.findBest(Arrays.asList( dataNodes[2], dataNodes[9], dataNodes[11])); assertEquals(dataNodes[2],r[0]); assertNull(r[1]); assertNull(r[2]); r = policy.findBest(Arrays.asList( dataNodes[2], dataNodes[6], dataNodes[9], dataNodes[12])); assertNull(r[0]); assertEquals(dataNodes[9],r[1]); assertEquals(dataNodes[12],r[2]); r = policy.findBest(Arrays.asList( dataNodes[2], dataNodes[4], dataNodes[9], dataNodes[12])); assertEquals(dataNodes[2],r[0]); assertEquals(dataNodes[4],r[1]); assertNull(r[2]); }