Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#getNsServiceRpcUris()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#getNsServiceRpcUris() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
 
Example 3
Source File: TestBalancer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void runBalancer(Configuration conf,
   long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
   int excludedNodes) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = runBalancer(namenodes, p, conf);
  if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    return;
  } else {
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  }
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("  .");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
}
 
Example 4
Source File: TestBalancer.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void runBalancer(Configuration conf,
   long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
   int excludedNodes) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = runBalancer(namenodes, p, conf);
  if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    return;
  } else {
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  }
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("  .");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
}
 
Example 5
Source File: TestBalancerWithNodeGroup.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void runBalancer(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
  assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  waitForHeartBeat(totalUsedSpace, totalCapacity);
  LOG.info("Rebalancing with default factor.");
  waitForBalancer(totalUsedSpace, totalCapacity);
}
 
Example 6
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void runMover() throws Exception {
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }
  int result = Mover.run(nnMap, conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
}
 
Example 7
Source File: TestStorageMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void runMover() throws Exception {
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }
  int result = Mover.run(nnMap, conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
}
 
Example 8
Source File: TestBalancerWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void runBalancer(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
  assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  waitForHeartBeat(totalUsedSpace, totalCapacity);
  LOG.info("Rebalancing with default factor.");
  waitForBalancer(totalUsedSpace, totalCapacity);
}
 
Example 9
Source File: TestBalancerWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void runBalancerCanFinish(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
  Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() ||
      (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode()));
  waitForHeartBeat(totalUsedSpace, totalCapacity);
  LOG.info("Rebalancing with default factor.");
}
 
Example 10
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
 
Example 11
Source File: TestMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
 
Example 12
Source File: TestBalancer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=100000)
public void testUnknownDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  initConf(conf);
  long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100};
  long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY};
  String racks[] = new String[] {RACK0, RACK1, RACK1};

  int numDatanodes = distribution.length;
  if (capacities.length != numDatanodes || racks.length != numDatanodes) {
    throw new IllegalArgumentException("Array length is not the same");
  }

  // calculate total space that need to be filled
  final long totalUsedSpace = sum(distribution);

  // fill the cluster
  ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
      (short) numDatanodes);

  // redistribute blocks
  Block[][] blocksDN = distributeBlocks(
      blocks, (short)(numDatanodes-1), distribution);

  // restart the cluster: do NOT format the cluster
  conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
      .format(false)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    for(int i = 0; i < 3; i++) {
      cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
    }

    cluster.startDataNodes(conf, 1, true, null,
        new String[]{RACK0}, null,new long[]{CAPACITY});
    cluster.triggerHeartbeats();

    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Set<String>  datanodes = new HashSet<String>();
    datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
    Balancer.Parameters p = new Balancer.Parameters(
        Balancer.Parameters.DEFAULT.policy,
        Balancer.Parameters.DEFAULT.threshold,
        Balancer.Parameters.DEFAULT.maxIdleIteration,
        datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
    final int r = Balancer.run(namenodes, p, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  } finally {
    cluster.shutdown();
  }
}
 
Example 13
Source File: TestBalancerWithHANameNodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestBalancerWithMultipleNameNodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
static void runBalancer(Suite s,
    final long totalUsed, final long totalCapacity) throws Exception {
  final double avg = totalUsed*100.0/totalCapacity;

  LOG.info("BALANCER 0: totalUsed=" + totalUsed
      + ", totalCapacity=" + totalCapacity
      + ", avg=" + avg);
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 1");

  // start rebalancing
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  LOG.info("BALANCER 2");
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 3");

  int i = 0;
  for(boolean balanced = false; !balanced; i++) {
    final long[] used = new long[s.cluster.getDataNodes().size()];
    final long[] cap = new long[used.length];

    for(int n = 0; n < s.clients.length; n++) {
      final DatanodeInfo[] datanodes = s.clients[n].getDatanodeReport(
          DatanodeReportType.ALL);
      Assert.assertEquals(datanodes.length, used.length);

      for(int d = 0; d < datanodes.length; d++) {
        if (n == 0) {
          used[d] = datanodes[d].getDfsUsed();
          cap[d] = datanodes[d].getCapacity();
          if (i % 100 == 0) {
            LOG.warn("datanodes[" + d
                + "]: getDfsUsed()=" + datanodes[d].getDfsUsed()
                + ", getCapacity()=" + datanodes[d].getCapacity());
          }
        } else {
          Assert.assertEquals(used[d], datanodes[d].getDfsUsed());
          Assert.assertEquals(cap[d], datanodes[d].getCapacity());
        }
      }
    }

    balanced = true;
    for(int d = 0; d < used.length; d++) {
      final double p = used[d]*100.0/cap[d];
      balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold;
      if (!balanced) {
        if (i % 100 == 0) {
          LOG.warn("datanodes " + d + " is not yet balanced: "
              + "used=" + used[d] + ", cap=" + cap[d] + ", avg=" + avg);
          LOG.warn("TestBalancer.sum(used)=" + TestBalancer.sum(used)
              + ", TestBalancer.sum(cap)=" + TestBalancer.sum(cap));
        }
        sleep(100);
        break;
      }
    }
  }
  LOG.info("BALANCER 6");
}
 
Example 15
Source File: TestBalancerWithMultipleNameNodes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static void runBalancer(Suite s,
    final long totalUsed, final long totalCapacity) throws Exception {
  final double avg = totalUsed*100.0/totalCapacity;

  LOG.info("BALANCER 0: totalUsed=" + totalUsed
      + ", totalCapacity=" + totalCapacity
      + ", avg=" + avg);
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 1");

  // start rebalancing
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  LOG.info("BALANCER 2");
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 3");

  int i = 0;
  for(boolean balanced = false; !balanced; i++) {
    final long[] used = new long[s.cluster.getDataNodes().size()];
    final long[] cap = new long[used.length];

    for(int n = 0; n < s.clients.length; n++) {
      final DatanodeInfo[] datanodes = s.clients[n].getDatanodeReport(
          DatanodeReportType.ALL);
      Assert.assertEquals(datanodes.length, used.length);

      for(int d = 0; d < datanodes.length; d++) {
        if (n == 0) {
          used[d] = datanodes[d].getDfsUsed();
          cap[d] = datanodes[d].getCapacity();
          if (i % 100 == 0) {
            LOG.warn("datanodes[" + d
                + "]: getDfsUsed()=" + datanodes[d].getDfsUsed()
                + ", getCapacity()=" + datanodes[d].getCapacity());
          }
        } else {
          Assert.assertEquals(used[d], datanodes[d].getDfsUsed());
          Assert.assertEquals(cap[d], datanodes[d].getCapacity());
        }
      }
    }

    balanced = true;
    for(int d = 0; d < used.length; d++) {
      final double p = used[d]*100.0/cap[d];
      balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold;
      if (!balanced) {
        if (i % 100 == 0) {
          LOG.warn("datanodes " + d + " is not yet balanced: "
              + "used=" + used[d] + ", cap=" + cap[d] + ", avg=" + avg);
          LOG.warn("TestBalancer.sum(used)=" + TestBalancer.sum(used)
              + ", TestBalancer.sum(cap)=" + TestBalancer.sum(cap));
        }
        sleep(100);
        break;
      }
    }
  }
  LOG.info("BALANCER 6");
}
 
Example 16
Source File: TestBalancer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}
 
Example 17
Source File: TestBalancer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure that balancer can't move pinned blocks.
 * If specified favoredNodes when create file, blocks will be pinned use 
 * sticky bit.
 * @throws Exception
 */
@Test(timeout=100000)
public void testBalancerWithPinnedBlocks() throws Exception {
  // This test assumes stick-bit based block pin mechanism available only
  // in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to
  // provide a different mechanism for Windows.
  assumeTrue(!Path.WINDOWS);

  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
  
  long[] capacities =  new long[] { CAPACITY, CAPACITY };
  String[] racks = { RACK0, RACK1 };
  int numOfDatanodes = capacities.length;

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
    .hosts(new String[]{"localhost", "localhost"})
    .racks(racks).simulatedCapacities(capacities).build();

  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf,
        cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
    
    // fill up the cluster to be 80% full
    long totalCapacity = sum(capacities);
    long totalUsedSpace = totalCapacity * 8 / 10;
    InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
    for (int i = 0; i < favoredNodes.length; i++) {
      favoredNodes[i] = cluster.getDataNodes().get(i).getXferAddress();
    }

    DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
        totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
        (short) numOfDatanodes, 0, false, favoredNodes);
    
    // start up an empty node with the same capacity
    cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 },
        new long[] { CAPACITY });
    
    totalCapacity += CAPACITY;
    
    // run balancer and validate results
    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

    // start rebalancing
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    
  } finally {
    cluster.shutdown();
  }
  
}
 
Example 18
Source File: TestBalancerWithHANameNodes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestBalancer.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}
 
Example 20
Source File: TestBalancer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=100000)
public void testUnknownDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  initConf(conf);
  long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100};
  long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY};
  String racks[] = new String[] {RACK0, RACK1, RACK1};

  int numDatanodes = distribution.length;
  if (capacities.length != numDatanodes || racks.length != numDatanodes) {
    throw new IllegalArgumentException("Array length is not the same");
  }

  // calculate total space that need to be filled
  final long totalUsedSpace = sum(distribution);

  // fill the cluster
  ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
      (short) numDatanodes);

  // redistribute blocks
  Block[][] blocksDN = distributeBlocks(
      blocks, (short)(numDatanodes-1), distribution);

  // restart the cluster: do NOT format the cluster
  conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
      .format(false)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    for(int i = 0; i < 3; i++) {
      cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
    }

    cluster.startDataNodes(conf, 1, true, null,
        new String[]{RACK0}, null,new long[]{CAPACITY});
    cluster.triggerHeartbeats();

    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Set<String>  datanodes = new HashSet<String>();
    datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
    Balancer.Parameters p = new Balancer.Parameters(
        Balancer.Parameters.DEFAULT.policy,
        Balancer.Parameters.DEFAULT.threshold,
        Balancer.Parameters.DEFAULT.maxIdleIteration,
        datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
    final int r = Balancer.run(namenodes, p, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  } finally {
    cluster.shutdown();
  }
}