Java Code Examples for org.apache.hadoop.hbase.util.Threads

The following examples show how to use org.apache.hadoop.hbase.util.Threads. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hbase   Source File: TestCompactingToCellFlatMapMemStore.java    License: Apache License 2.0 6 votes vote down vote up
private long addRowsByKeysDataSize(final AbstractMemStore hmc, String[] keys) {
  byte[] fam = Bytes.toBytes("testfamily");
  byte[] qf = Bytes.toBytes("testqualifier");
  MemStoreSizing memstoreSizing = new NonThreadSafeMemStoreSizing();
  for (int i = 0; i < keys.length; i++) {
    long timestamp = System.currentTimeMillis();
    Threads.sleep(1); // to make sure each kv gets a different ts
    byte[] row = Bytes.toBytes(keys[i]);
    byte[] val = Bytes.toBytes(keys[i] + i);
    KeyValue kv = new KeyValue(row, fam, qf, timestamp, val);
    hmc.add(kv, memstoreSizing);
    LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp());
  }
  MemStoreSize mss = memstoreSizing.getMemStoreSize();
  regionServicesForStores.addMemStoreSize(mss.getDataSize(), mss.getHeapSize(),
    mss.getOffHeapSize(), mss.getCellsCount());
  return mss.getDataSize();
}
 
Example 2
Source Project: hbase   Source File: RSGroupInfoManagerImpl.java    License: Apache License 2.0 6 votes vote down vote up
private boolean waitForGroupTableOnline() {
  while (isMasterRunning(masterServices)) {
    try {
      TableStateManager tsm = masterServices.getTableStateManager();
      if (!tsm.isTablePresent(RSGROUP_TABLE_NAME)) {
        createRSGroupTable();
      }
      // try reading from the table
      FutureUtils.get(conn.getTable(RSGROUP_TABLE_NAME).get(new Get(ROW_KEY)));
      LOG.info("RSGroup table={} is online, refreshing cached information", RSGROUP_TABLE_NAME);
      RSGroupInfoManagerImpl.this.refresh(true);
      online = true;
      // flush any inconsistencies between ZK and HTable
      RSGroupInfoManagerImpl.this.flushConfig();
      // migrate after we are online.
      migrate();
      return true;
    } catch (Exception e) {
      LOG.warn("Failed to perform check", e);
      // 100ms is short so let's just ignore the interrupt
      Threads.sleepWithoutInterrupt(100);
    }
  }
  return false;
}
 
Example 3
Source Project: hbase   Source File: TestCellACLs.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  // Create the test table (owner added to the _acl_ table)
  Admin admin = TEST_UTIL.getAdmin();
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(testTable.getTableName());
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(TEST_FAMILY);
  familyDescriptor.setMaxVersions(4);
  tableDescriptor.setOwner(USER_OWNER);
  tableDescriptor.setColumnFamily(familyDescriptor);
  admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") });
  TEST_UTIL.waitTableEnabled(testTable.getTableName());
  LOG.info("Sleeping a second because of HBASE-12581");
  Threads.sleep(1000);
}
 
Example 4
Source Project: hbase   Source File: MiniHBaseCluster.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  try {
    this.user.runAs(new PrivilegedAction<Object>() {
      @Override
      public Object run() {
        runRegionServer();
        return null;
      }
    });
  } catch (Throwable t) {
    LOG.error("Exception in run", t);
  } finally {
    // Run this on the way out.
    if (this.shutdownThread != null) {
      this.shutdownThread.start();
      Threads.shutdown(this.shutdownThread, 30000);
    }
  }
}
 
Example 5
Source Project: hbase   Source File: MiniHBaseCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Starts a region server thread and waits until its processed by master. Throws an exception
 * when it can't start a region server or when the region server is not processed by master
 * within the timeout.
 *
 * @return New RegionServerThread
 */
public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout)
    throws IOException {

  JVMClusterUtil.RegionServerThread t =  startRegionServer();
  ServerName rsServerName = t.getRegionServer().getServerName();

  long start = System.currentTimeMillis();
  ClusterMetrics clusterStatus = getClusterMetrics();
  while ((System.currentTimeMillis() - start) < timeout) {
    if (clusterStatus != null && clusterStatus.getLiveServerMetrics().containsKey(rsServerName)) {
      return t;
    }
    Threads.sleep(100);
  }
  if (t.getRegionServer().isOnline()) {
    throw new IOException("RS: " + rsServerName + " online, but not processed by master");
  } else {
    throw new IOException("RS: " + rsServerName + " is offline");
  }
}
 
Example 6
Source Project: hbase   Source File: MiniHBaseCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Blocks until there is an active master and that master has completed
 * initialization.
 *
 * @return true if an active master becomes available.  false if there are no
 *         masters left.
 * @throws InterruptedException
 */
@Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
  List<JVMClusterUtil.MasterThread> mts;
  long start = System.currentTimeMillis();
  while (!(mts = getMasterThreads()).isEmpty()
      && (System.currentTimeMillis() - start) < timeout) {
    for (JVMClusterUtil.MasterThread mt : mts) {
      if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
        return true;
      }
    }

    Threads.sleep(100);
  }
  return false;
}
 
Example 7
@Override
public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> c,
    InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
  if (c.getEnvironment().getRegionInfo().isMetaRegion()) {
    int concurrency = CONCURRENCY.incrementAndGet();
    for (;;) {
      int max = MAX_CONCURRENCY.get();
      if (concurrency <= max) {
        break;
      }
      if (MAX_CONCURRENCY.compareAndSet(max, concurrency)) {
        break;
      }
    }
    Threads.sleepWithoutInterrupt(10);
  }
  return hasNext;
}
 
Example 8
Source Project: hbase   Source File: TestCompactingToCellFlatMapMemStore.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCountOfCellsAfterFlatteningByIterator() throws IOException {
  String[] keys1 = { "A", "B", "C" }; // A, B, C
  addRowsByKeysWith50Cols(memstore, keys1);
  // this should only flatten as there are no duplicates
  ((CompactingMemStore) memstore).flushInMemory();
  while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
    Threads.sleep(10);
  }
  // Just doing the cnt operation here
  MemStoreSegmentsIterator itr = new MemStoreMergerSegmentsIterator(
      ((CompactingMemStore) memstore).getImmutableSegments().getStoreSegments(),
      CellComparatorImpl.COMPARATOR, 10);
  int cnt = 0;
  try {
    while (itr.next() != null) {
      cnt++;
    }
  } finally {
    itr.close();
  }
  assertEquals("the count should be ", 150, cnt);
}
 
Example 9
Source Project: hbase   Source File: TestProcedureSuspended.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testYieldWhileHoldingLocks() {
  final AtomicBoolean lock = new AtomicBoolean(false);

  final TestLockProcedure p1 = new TestLockProcedure(lock, "key", true, false);
  final TestLockProcedure p2 = new TestLockProcedure(lock, "key", true, false);

  procExecutor.submitProcedure(p1);
  procExecutor.submitProcedure(p2);

  // try to execute a bunch of yield on p1, p2 should be blocked
  while (p1.getTimestamps().size() < 100) {
    Threads.sleep(10);
  }

  assertEquals(0, p2.getTimestamps().size());

  // wait until p1 is completed
  p1.setThrowYield(false);
  ProcedureTestingUtility.waitProcedure(procExecutor, p1);

  // try to execute a bunch of yield on p2
  while (p2.getTimestamps().size() < 100) {
    Threads.sleep(10);
  }

  assertEquals(p1.getTimestamps().get(p1.getTimestamps().size() - 1).longValue() + 1,
    p2.getTimestamps().get(0).longValue());

  // wait until p2 is completed
  p1.setThrowYield(false);
  ProcedureTestingUtility.waitProcedure(procExecutor, p1);
}
 
Example 10
Source Project: hbase-operator-tools   Source File: TestHBCK2.java    License: Apache License 2.0 5 votes vote down vote up
private void waitOnPids(List<Long> pids) {
  for (Long pid: pids) {
    while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().
        isFinished(pid)) {
      Threads.sleep(100);
    }
  }
}
 
Example 11
Source Project: hbase   Source File: TestAsyncWALReplay.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  GROUP = new NioEventLoopGroup(1, Threads.newDaemonThreadFactory("TestAsyncWALReplay"));
  CHANNEL_CLASS = NioSocketChannel.class;
  Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration();
  conf.set(WALFactory.WAL_PROVIDER, "asyncfs");
  AbstractTestWALReplay.setUpBeforeClass();
}
 
Example 12
Source Project: hbase   Source File: TestAsyncTableNoncedRetry.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Result postIncrement(ObserverContext<RegionCoprocessorEnvironment> c,
    Increment increment, Result result) throws IOException {
  if (CALLED.getAndIncrement() == 0) {
    Threads.sleepWithoutInterrupt(SLEEP_TIME);
  }
  return RegionObserver.super.postIncrement(c, increment, result);
}
 
Example 13
Source Project: kylin   Source File: HBaseConnection.java    License: Apache License 2.0 5 votes vote down vote up
public static ExecutorService getCoprocessorPool() {
    if (coprocessorPool != null) {
        return coprocessorPool;
    }

    synchronized (HBaseConnection.class) {
        if (coprocessorPool != null) {
            return coprocessorPool;
        }

        KylinConfig config = KylinConfig.getInstanceFromEnv();

        // copy from HConnectionImplementation.getBatchPool()
        int maxThreads = config.getHBaseMaxConnectionThreads();
        int coreThreads = config.getHBaseCoreConnectionThreads();
        long keepAliveTime = config.getHBaseConnectionThreadPoolAliveSeconds();
        LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * 100);
        ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, //
                Threads.newDaemonThreadFactory("kylin-coproc-"));
        tpe.allowCoreThreadTimeOut(true);

        logger.info("Creating coprocessor thread pool with max of {}, core of {}", maxThreads, coreThreads);

        coprocessorPool = tpe;
        return coprocessorPool;
    }
}
 
Example 14
Source Project: hbase   Source File: LogRollBackupSubprocedurePool.java    License: Apache License 2.0 5 votes vote down vote up
public LogRollBackupSubprocedurePool(String name, Configuration conf) {
  // configure the executor service
  long keepAlive =
      conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
        LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
  int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS);
  this.name = name;
  executor =
      new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS,
          new LinkedBlockingQueue<>(),
          Threads.newDaemonThreadFactory("rs(" + name + ")-backup"));
  taskPool = new ExecutorCompletionService<>(executor);
}
 
Example 15
Source Project: hbase   Source File: TestCatalogJanitorInMemoryStates.java    License: Apache License 2.0 5 votes vote down vote up
private PairOfSameType<RegionInfo> waitOnDaughters(final RegionInfo r)
    throws IOException {
  long start = System.currentTimeMillis();
  PairOfSameType<RegionInfo> pair = null;
  try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
       Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
    Result result = null;
    RegionInfo region = null;
    while ((System.currentTimeMillis() - start) < 60000) {
      result = metaTable.get(new Get(r.getRegionName()));
      if (result == null) {
        break;
      }
      region = CatalogFamilyFormat.getRegionInfo(result);
      if (region.isSplitParent()) {
        LOG.debug(region.toString() + " IS a parent!");
        pair = MetaTableAccessor.getDaughterRegions(result);
        break;
      }
      Threads.sleep(100);
    }

    if (pair.getFirst() == null || pair.getSecond() == null) {
      throw new IOException("Failed to get daughters, for parent region: " + r);
    }
    return pair;
  }
}
 
Example 16
Source Project: hbase   Source File: TestLoadAndSwitchEncodeOnDisk.java    License: Apache License 2.0 5 votes vote down vote up
@Override
@Test
public void loadTest() throws Exception {
  Admin admin = TEST_UTIL.getAdmin();

  compression = Compression.Algorithm.GZ; // used for table setup
  super.loadTest();

  ColumnFamilyDescriptor hcd = getColumnDesc(admin);
  System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
  Table t = TEST_UTIL.getConnection().getTable(TABLE);
  assertAllOnLine(t);

  admin.disableTable(TABLE);
  admin.modifyColumnFamily(TABLE, hcd);

  System.err.println("\nRe-enabling table\n");
  admin.enableTable(TABLE);

  System.err.println("\nNew column descriptor: " +
      getColumnDesc(admin) + "\n");

  // The table may not have all regions on line yet.  Assert online before
  // moving to major compact.
  assertAllOnLine(t);

  System.err.println("\nCompacting the table\n");
  admin.majorCompact(TABLE);
  // Wait until compaction completes
  Threads.sleepWithoutInterrupt(5000);
  HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(50);
  }

  System.err.println("\nDone with the test, shutting down the cluster\n");
}
 
Example 17
Source Project: hbase   Source File: TestRSKilledWhenInitializing.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void handleReportForDutyResponse(RegionServerStartupResponse c)
throws IOException {
  if (killedRS.compareAndSet(null, getServerName())) {
    // Make sure Master is up so it will see the removal of the ephemeral znode for this RS.
    while (!masterActive.get()) {
      Threads.sleep(100);
    }
    super.kill();
  } else {
    super.handleReportForDutyResponse(c);
  }
}
 
Example 18
Source Project: hbase   Source File: TestProcedureExecutor.java    License: Apache License 2.0 5 votes vote down vote up
private int waitThreadCount(final int expectedThreads) {
  while (procExecutor.isRunning()) {
    if (procExecutor.getWorkerThreadCount() == expectedThreads) {
      break;
    }
    LOG.debug("waiting for thread count=" + expectedThreads +
      " current=" + procExecutor.getWorkerThreadCount());
    Threads.sleepWithoutInterrupt(250);
  }
  return procExecutor.getWorkerThreadCount();
}
 
Example 19
Source Project: hbase   Source File: ZKPermissionWatcher.java    License: Apache License 2.0 5 votes vote down vote up
public ZKPermissionWatcher(ZKWatcher watcher,
    AuthManager authManager, Configuration conf) {
  super(watcher);
  this.authManager = authManager;
  String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
  this.aclZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, aclZnodeParent);
  executor = Executors.newSingleThreadExecutor(
      Threads.newDaemonThreadFactory("zk-permission-watcher"));
}
 
Example 20
Source Project: hbase   Source File: TaskMonitor.java    License: Apache License 2.0 5 votes vote down vote up
TaskMonitor(Configuration conf) {
  maxTasks = conf.getInt(MAX_TASKS_KEY, DEFAULT_MAX_TASKS);
  expirationTime = conf.getLong(EXPIRATION_TIME_KEY, DEFAULT_EXPIRATION_TIME);
  rpcWarnTime = conf.getLong(RPC_WARN_TIME_KEY, DEFAULT_RPC_WARN_TIME);
  tasks = new CircularFifoQueue(maxTasks);
  rpcTasks = Lists.newArrayList();
  monitorInterval = conf.getLong(MONITOR_INTERVAL_KEY, DEFAULT_MONITOR_INTERVAL);
  monitorThread = new Thread(new MonitorRunnable());
  Threads.setDaemonThreadRunning(monitorThread, "Monitor thread for TaskMonitor");
}
 
Example 21
Source Project: hbase   Source File: TestOpenTableInCoprocessor.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return a pool that has one thread only at every time. A second action added to the pool (
 *         running concurrently), will cause an exception.
 */
private ExecutorService getPool() {
  int maxThreads = 1;
  long keepAliveTime = 60;
  ThreadPoolExecutor pool =
      new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
          new SynchronousQueue<>(), Threads.newDaemonThreadFactory("hbase-table"));
  pool.allowCoreThreadTimeOut(true);
  return pool;
}
 
Example 22
Source Project: hbase   Source File: EntityLock.java    License: Apache License 2.0 5 votes vote down vote up
public void unlock() throws IOException {
  Threads.shutdown(worker.shutdown());
  try {
    stub.lockHeartbeat(null,
      LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build());
  } catch (Exception e) {
    throw ProtobufUtil.handleRemoteException(e);
  }
}
 
Example 23
Source Project: hbase   Source File: TestRegionObserverInterface.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPreWALRestoreSkip() throws Exception {
  LOG.info(TestRegionObserverInterface.class.getName() + "." + name.getMethodName());
  TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
  Table table = util.createTable(tableName, new byte[][] { A, B, C });

  try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
    JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
    ServerName sn2 = rs1.getRegionServer().getServerName();
    String regEN = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();

    util.getAdmin().move(Bytes.toBytes(regEN), sn2);
    while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
      Thread.sleep(100);
    }

    Put put = new Put(ROW);
    put.addColumn(A, A, A);
    put.addColumn(B, B, B);
    put.addColumn(C, C, C);
    table.put(put);

    cluster.killRegionServer(rs1.getRegionServer().getServerName());
    Threads.sleep(20000); // just to be sure that the kill has fully started.
    util.waitUntilAllRegionsAssigned(tableName);
  }

  verifyMethodResult(SimpleRegionObserver.class,
    new String[] { "getCtPreWALRestore", "getCtPostWALRestore", }, tableName,
    new Integer[] { 0, 0 });

  util.deleteTable(tableName);
  table.close();
}
 
Example 24
Source Project: hbase   Source File: TwoConcurrentActionPolicy.java    License: Apache License 2.0 5 votes vote down vote up
public TwoConcurrentActionPolicy(long sleepTime, Action[] actionsOne, Action[] actionsTwo) {
  super(sleepTime);
  this.actionsOne = actionsOne;
  this.actionsTwo = actionsTwo;
  executor = Executors.newFixedThreadPool(2,
      Threads.newDaemonThreadFactory("TwoConcurrentAction"));
}
 
Example 25
Source Project: hbase   Source File: ZKUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Waits for HBase installation's base (parent) znode to become available.
 * @throws IOException on ZK errors
 */
public static void waitForBaseZNode(Configuration conf) throws IOException {
  LOG.info("Waiting until the base znode is available");
  String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
      HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf),
      conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
      HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance);

  final int maxTimeMs = 10000;
  final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;

  KeeperException keeperEx = null;
  try {
    try {
      for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
        try {
          if (zk.exists(parentZNode, false) != null) {
            LOG.info("Parent znode exists: {}", parentZNode);
            keeperEx = null;
            break;
          }
        } catch (KeeperException e) {
          keeperEx = e;
        }
        Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
      }
    } finally {
      zk.close();
    }
  } catch (InterruptedException ex) {
    Thread.currentThread().interrupt();
  }

  if (keeperEx != null) {
    throw new IOException(keeperEx);
  }
}
 
Example 26
Source Project: hbase   Source File: HMaster.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
  try {
    if (!conf.getBoolean("hbase.testing.nocluster", false)) {
      Threads.setDaemonThreadRunning(new Thread(() -> {
        try {
          int infoPort = putUpJettyServer();
          startActiveMasterManager(infoPort);
        } catch (Throwable t) {
          // Make sure we log the exception.
          String error = "Failed to become Active Master";
          LOG.error(error, t);
          // Abort should have been called already.
          if (!isAborted()) {
            abort(error, t);
          }
        }
      }), getName() + ":becomeActiveMaster");
    }
    // Fall in here even if we have been aborted. Need to run the shutdown services and
    // the super run call will do this for us.
    super.run();
  } finally {
    if (this.clusterSchemaService != null) {
      // If on way out, then we are no longer active master.
      this.clusterSchemaService.stopAsync();
      try {
        this.clusterSchemaService.awaitTerminated(
            getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
            DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
      } catch (TimeoutException te) {
        LOG.warn("Failed shutdown of clusterSchemaService", te);
      }
    }
    this.activeMaster = false;
  }
}
 
Example 27
Source Project: hbase   Source File: TestTableStateManager.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testMigration() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY_STR);
  TEST_UTIL.getAdmin().disableTable(tableName);
  // Table is disabled. Now remove the DISABLED column from the hbase:meta for this table's
  // region. We want to see if Master will read the DISABLED from zk and make use of it as
  // though it were reading the zk table state written by a hbase-1.x cluster.
  TableState state = MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), tableName);
  assertTrue("State=" + state, state.getState().equals(TableState.State.DISABLED));
  MetaTableAccessor.deleteTableState(TEST_UTIL.getConnection(), tableName);
  assertTrue(MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), tableName) == null);
  // Now kill Master so a new one can come up and run through the zk migration.
  HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
  master.stop("Restarting");
  while (!master.isStopped()) {
    Threads.sleep(1);
  }
  assertTrue(master.isStopped());
  JVMClusterUtil.MasterThread newMasterThread = TEST_UTIL.getMiniHBaseCluster().startMaster();
  master = newMasterThread.getMaster();
  while (!master.isInitialized()) {
    Threads.sleep(1);
  }
  assertTrue(MetaTableAccessor.getTableState(TEST_UTIL.getConnection(),
      tableName).getState().equals(TableState.State.DISABLED));
}
 
Example 28
Source Project: hbase   Source File: TestRegionServerMetrics.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void startCluster() throws Exception {
  metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class);
  TEST_UTIL = new HBaseTestingUtility();
  TABLES_ON_MASTER = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
  conf = TEST_UTIL.getConfiguration();
  conf.getLong("hbase.splitlog.max.resubmit", 0);
  // Make the failure test faster
  conf.setInt("zookeeper.recovery.retry", 0);
  // testMobMetrics creates few hfiles and manages compaction manually.
  conf.setInt("hbase.hstore.compactionThreshold", 100);
  conf.setInt("hbase.hstore.compaction.max", 100);
  conf.setInt("hbase.regionserver.periodicmemstoreflusher.rangeofdelayseconds", 4*60);
  conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);

  TEST_UTIL.startMiniCluster();
  cluster = TEST_UTIL.getHBaseCluster();
  cluster.waitForActiveAndReadyMaster();
  admin = TEST_UTIL.getAdmin();
  connection = TEST_UTIL.getConnection();

  while (cluster.getLiveRegionServerThreads().isEmpty() &&
      cluster.getRegionServer(0) == null &&
      rs.getMetrics() == null) {
    Threads.sleep(100);
  }
  rs = cluster.getRegionServer(0);
  metricsRegionServer = rs.getMetrics();
  serverSource = metricsRegionServer.getMetricsSource();
}
 
Example 29
Source Project: hbase   Source File: TestServerBusyException.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
    final Get get, final List<Cell> results) throws IOException {
  // After first sleep, all requests are timeout except the last retry. If we handle
  // all the following requests, finally the last request is also timeout. If we drop all
  // timeout requests, we can handle the last request immediately and it will not timeout.
  if (ct.incrementAndGet() <= 1) {
    Threads.sleep(SLEEP_TIME * RPC_RETRY * 2);
  } else {
    Threads.sleep(SLEEP_TIME);
  }
}
 
Example 30
Source Project: hbase   Source File: ShutdownHook.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
  boolean b = this.conf.getBoolean(RUN_SHUTDOWN_HOOK, true);
  LOG.info("Shutdown hook starting; " + RUN_SHUTDOWN_HOOK + "=" + b +
    "; fsShutdownHook=" + this.fsShutdownHook);
  if (b) {
    this.stop.stop("Shutdown hook");
    Threads.shutdown(this.threadToJoin);
    if (this.fsShutdownHook != null) {
      synchronized (fsShutdownHooks) {
        int refs = fsShutdownHooks.get(fsShutdownHook);
        if (refs == 1) {
          LOG.info("Starting fs shutdown hook thread.");
          Thread fsShutdownHookThread = (fsShutdownHook instanceof Thread) ?
            (Thread)fsShutdownHook : new Thread(fsShutdownHook,
              fsShutdownHook.getClass().getSimpleName() + "-shutdown-hook");
          fsShutdownHookThread.start();
          Threads.shutdown(fsShutdownHookThread,
          this.conf.getLong(FS_SHUTDOWN_HOOK_WAIT, 30000));
        }
        if (refs > 0) {
          fsShutdownHooks.put(fsShutdownHook, refs - 1);
        }
      }
    }
  }
  LOG.info("Shutdown hook finished.");
}