Java Code Examples for org.apache.hadoop.hbase.MiniHBaseCluster#stopRegionServer()

The following examples show how to use org.apache.hadoop.hbase.MiniHBaseCluster#stopRegionServer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReplicationChangingPeerRegionservers.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException {
  LOG.info("testSimplePutDelete");
  MiniHBaseCluster peerCluster = UTIL2.getMiniHBaseCluster();
  // This test wants two RS's up. We only run one generally so add one.
  peerCluster.startRegionServer();
  Waiter.waitFor(peerCluster.getConfiguration(), 30000, new Waiter.Predicate<Exception>() {
    @Override public boolean evaluate() throws Exception {
      return peerCluster.getLiveRegionServerThreads().size() > 1;
    }
  });
  int numRS = peerCluster.getRegionServerThreads().size();

  doPutTest(Bytes.toBytes(1));

  int rsToStop = peerCluster.getServerWithMeta() == 0 ? 1 : 0;
  peerCluster.stopRegionServer(rsToStop);
  peerCluster.waitOnRegionServer(rsToStop);

  // Sanity check
  assertEquals(numRS - 1, peerCluster.getRegionServerThreads().size());

  doPutTest(Bytes.toBytes(2));

  peerCluster.startRegionServer();

  // Sanity check
  assertEquals(numRS, peerCluster.getRegionServerThreads().size());

  doPutTest(Bytes.toBytes(3));
}
 
Example 2
Source File: TestWALRecoveryCaching.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * @param cluster
 * @param indexTable
 * @param primaryTable
 */
private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {

  ServerName shared = getSharedServer(cluster, indexTable, primaryTable);
  boolean tryIndex = true;
  while (shared == null) {

    // start killing servers until we get an overlap
    Set<ServerName> servers;
    byte[] table = null;
    // switch which server we kill each time to get region movement
    if (tryIndex) {
      table = indexTable;
    } else {
      table = primaryTable;
    }
    servers = getServersForTable(cluster, table);
    tryIndex = !tryIndex;
    for (ServerName server : servers) {
      // find the regionserver that matches the passed server
      List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);

      LOG.info("Shutting down and reassigning regions from " + server);
      cluster.stopRegionServer(server);
      cluster.waitForRegionServerToStop(server, TIMEOUT);

      // force reassign the regions from the table
      for (HRegion region : online) {
        cluster.getMaster().assignRegion(region.getRegionInfo());
      }

      LOG.info("Starting region server:" + server.getHostname());
      cluster.startRegionServer(server.getHostname());

      cluster.waitForRegionServerToStart(server.getHostname(), TIMEOUT);

      // start a server to get back to the base number of servers
      LOG.info("STarting server to replace " + server);
      cluster.startRegionServer();
      break;
    }

    shared = getSharedServer(cluster, indexTable, primaryTable);
  }
  return shared;
}
 
Example 3
Source File: TestReplicationSource.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that recovered queues are preserved on a regionserver shutdown.
 * See HBASE-18192
 */
@Test
public void testServerShutdownRecoveredQueue() throws Exception {
  try {
    // Ensure single-threaded WAL
    conf.set("hbase.wal.provider", "defaultProvider");
    conf.setInt("replication.sleep.before.failover", 2000);
    // Introduces a delay in regionserver shutdown to give the race condition a chance to kick in.
    conf.set(HConstants.REGION_SERVER_IMPL, ShutdownDelayRegionServer.class.getName());
    MiniHBaseCluster cluster = TEST_UTIL.startMiniCluster(2);
    TEST_UTIL_PEER.startMiniCluster(1);

    HRegionServer serverA = cluster.getRegionServer(0);
    final ReplicationSourceManager managerA =
        ((Replication) serverA.getReplicationSourceService()).getReplicationManager();
    HRegionServer serverB = cluster.getRegionServer(1);
    final ReplicationSourceManager managerB =
        ((Replication) serverB.getReplicationSourceService()).getReplicationManager();
    final Admin admin = TEST_UTIL.getAdmin();

    final String peerId = "TestPeer";
    admin.addReplicationPeer(peerId,
      ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL_PEER.getClusterKey()).build());
    // Wait for replication sources to come up
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return !(managerA.getSources().isEmpty() || managerB.getSources().isEmpty());
      }
    });
    // Disabling peer makes sure there is at least one log to claim when the server dies
    // The recovered queue will also stay there until the peer is disabled even if the
    // WALs it contains have no data.
    admin.disableReplicationPeer(peerId);

    // Stopping serverA
    // It's queues should be claimed by the only other alive server i.e. serverB
    cluster.stopRegionServer(serverA.getServerName());
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerB.getOldSources().size() == 1;
      }
    });

    final HRegionServer serverC = cluster.startRegionServer().getRegionServer();
    serverC.waitForServerOnline();
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return serverC.getReplicationSourceService() != null;
      }
    });
    final ReplicationSourceManager managerC =
        ((Replication) serverC.getReplicationSourceService()).getReplicationManager();
    // Sanity check
    assertEquals(0, managerC.getOldSources().size());

    // Stopping serverB
    // Now serverC should have two recovered queues:
    // 1. The serverB's normal queue
    // 2. serverA's recovered queue on serverB
    cluster.stopRegionServer(serverB.getServerName());
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerC.getOldSources().size() == 2;
      }
    });
    admin.enableReplicationPeer(peerId);
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerC.getOldSources().size() == 0;
      }
    });
  } finally {
    conf.set(HConstants.REGION_SERVER_IMPL, HRegionServer.class.getName());
  }
}
 
Example 4
Source File: TestWALRecoveryCaching.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * @param cluster
 * @param indexTable
 * @param primaryTable
 */
private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {

  ServerName shared = getSharedServer(cluster, indexTable, primaryTable);
  boolean tryIndex = true;
  while (shared == null) {

    // start killing servers until we get an overlap
    Set<ServerName> servers;
    byte[] table = null;
    // switch which server we kill each time to get region movement
    if (tryIndex) {
      table = indexTable;
    } else {
      table = primaryTable;
    }
    servers = getServersForTable(cluster, table);
    tryIndex = !tryIndex;
    for (ServerName server : servers) {
      // find the regionserver that matches the passed server
      List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);

      LOGGER.info("Shutting down and reassigning regions from " + server);
      cluster.stopRegionServer(server);
      cluster.waitForRegionServerToStop(server, TIMEOUT);

      // force reassign the regions from the table
      for (Region region : online) {
        cluster.getMaster().getAssignmentManager().assign(region.getRegionInfo());
      }

      LOGGER.info("Starting region server:" + server.getHostname());
      cluster.startRegionServer(server.getHostname(), server.getPort());

      cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);

      // start a server to get back to the base number of servers
      LOGGER.info("STarting server to replace " + server);
      cluster.startRegionServer();
      break;
    }

    shared = getSharedServer(cluster, indexTable, primaryTable);
  }
  return shared;
}
 
Example 5
Source File: TestWALRecoveryCaching.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * @param miniHBaseCluster
 * @param indexedTableName
 * @param tableNameString
 */
private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {

  ServerName shared = getSharedServer(cluster, indexTable, primaryTable);
  boolean tryIndex = true;
  while (shared == null) {

    // start killing servers until we get an overlap
    Set<ServerName> servers;
    byte[] table = null;
    // switch which server we kill each time to get region movement
    if (tryIndex) {
      table = indexTable;
    } else {
      table = primaryTable;
    }
    servers = getServersForTable(cluster, table);
    tryIndex = !tryIndex;
    for (ServerName server : servers) {
      // find the regionserver that matches the passed server
      List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);

      LOG.info("Shutting down and reassigning regions from " + server);
      cluster.stopRegionServer(server);
      cluster.waitForRegionServerToStop(server, TIMEOUT);

      // force reassign the regions from the table
      for (HRegion region : online) {
        cluster.getMaster().assign(region.getRegionName());
      }

      LOG.info("Starting region server:" + server.getHostname());
      cluster.startRegionServer(server.getHostname());

      cluster.waitForRegionServerToStart(server.getHostname(), TIMEOUT);

      // start a server to get back to the base number of servers
      LOG.info("STarting server to replace " + server);
      cluster.startRegionServer();
      break;
    }

    shared = getSharedServer(cluster, indexTable, primaryTable);
  }
  return shared;
}