org.apache.hadoop.hbase.client.ClusterConnection Java Examples

The following examples show how to use org.apache.hadoop.hbase.client.ClusterConnection. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBCK2.java    From hbase-operator-tools with Apache License 2.0 6 votes vote down vote up
/**
 * Check for HBCK support.
 * Expects created connection.
 * @param supportedVersions list of zero or more supported versions.
 */
void checkHBCKSupport(ClusterConnection connection, String cmd, String ... supportedVersions)
    throws IOException {
  if (skipCheck) {
    LOG.info("Skipped {} command version check; 'skip' set", cmd);
    return;
  }
  try (Admin admin = connection.getAdmin()) {
    String serverVersion = admin.
        getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION)).getHBaseVersion();
    String [] thresholdVersions = supportedVersions == null || supportedVersions.length == 0?
        MINIMUM_HBCK2_VERSION: supportedVersions;
    boolean supported = Version.check(serverVersion, thresholdVersions);
    if (!supported) {
      throw new UnsupportedOperationException(cmd + " not supported on server version=" +
          serverVersion + "; needs at least a server that matches or exceeds " +
          Arrays.toString(thresholdVersions));
    }
  }
}
 
Example #2
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetRegionState() throws IOException {
  TEST_UTIL.createTable(REGION_STATES_TABLE_NAME, Bytes.toBytes("family1"));
  try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
    List<RegionInfo> regions = admin.getRegions(REGION_STATES_TABLE_NAME);
    RegionInfo info = regions.get(0);
    assertEquals(RegionState.State.OPEN, getCurrentRegionState(info));
    String region = info.getEncodedName();
    try (ClusterConnection connection = this.hbck2.connect()) {
      this.hbck2.setRegionState(connection, region, RegionState.State.CLOSING);
    }
    assertEquals(RegionState.State.CLOSING, getCurrentRegionState(info));
  } finally {
    TEST_UTIL.deleteTable(REGION_STATES_TABLE_NAME);
  }
}
 
Example #3
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
private void unassigns(List<RegionInfo> regions, String[] regionStrsArray) throws IOException {
  try (ClusterConnection connection = this.hbck2.connect(); Hbck hbck = connection.getHbck()) {
    List<Long> pids = this.hbck2.unassigns(hbck, regionStrsArray);
    waitOnPids(pids);
  }
  for (RegionInfo ri : regions) {
    RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
            getRegionStates().getRegionState(ri.getEncodedName());
    LOG.info("RS: {}", rs.toString());
    assertTrue(rs.toString(), rs.isClosed());
  }
}
 
Example #4
Source File: RegionCoprocessorRpcChannel.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) {
    this.table = table;
    this.row = row;
    this.conn = conn;
    this.operationTimeout = conn.getConnectionConfiguration().getOperationTimeout();
    this.rpcCallerFactory = conn.getRpcRetryingCallerFactory();
}
 
Example #5
Source File: MetaDataUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * This function checks if all regions of a table is online
 * @param table
 * @return true when all regions of a table are online
 * @throws IOException
 * @throws
 */
public static boolean tableRegionsOnline(Configuration conf, PTable table) {
    try (ClusterConnection hcon =
            (ClusterConnection) ConnectionFactory.createConnection(conf)) {
        List<HRegionLocation> locations = hcon.locateRegions(
          org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes()));

        for (HRegionLocation loc : locations) {
            try {
                ServerName sn = loc.getServerName();
                if (sn == null) continue;

                AdminService.BlockingInterface admin = hcon.getAdmin(sn);
                HBaseRpcController controller = hcon.getRpcControllerFactory().newController();
                org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller,
                    admin, loc.getRegion().getRegionName());
            } catch (RemoteException e) {
                LOGGER.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e);
                return false;
            }
        }
    } catch (IOException ex) {
        LOGGER.warn("tableRegionsOnline failed due to:", ex);
        return false;
    }
    return true;
}
 
Example #6
Source File: HBaseFsckRepair.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
/**
 * Contacts a region server and waits up to hbase.hbck.close.timeout ms (default 120s) to close
 * the region. This bypasses the active hmaster.
 */
public static void closeRegionSilentlyAndWait(Connection connection, ServerName server,
    RegionInfo region) throws IOException, InterruptedException {
  long timeout = connection.getConfiguration()
      .getLong("hbase.hbck.close.timeout", 120000);
  ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
      region, timeout);
}
 
Example #7
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
@Test (expected = IllegalArgumentException.class)
public void testSetRegionStateInvalidState() throws IOException {
  TEST_UTIL.createTable(REGION_STATES_TABLE_NAME, Bytes.toBytes("family1"));
  try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
    List<RegionInfo> regions = admin.getRegions(REGION_STATES_TABLE_NAME);
    RegionInfo info = regions.get(0);
    assertEquals(RegionState.State.OPEN, getCurrentRegionState(info));
    String region = info.getEncodedName();
    try (ClusterConnection connection = this.hbck2.connect()) {
      this.hbck2.setRegionState(connection, region, null);
    }
  } finally {
    TEST_UTIL.deleteTable(REGION_STATES_TABLE_NAME);
  }
}
 
Example #8
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetRegionStateInvalidRegion() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect()) {
    assertEquals(HBCK2.EXIT_FAILURE, this.hbck2.setRegionState(connection, "NO_REGION",
        RegionState.State.CLOSING));
  }
}
 
Example #9
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetTableStateInMeta() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect(); Hbck hbck = connection.getHbck()) {
    TableState state = this.hbck2.setTableState(hbck, TABLE_NAME, TableState.State.DISABLED);
    assertTrue("Found=" + state.getState(), state.isEnabled());
    // Restore the state.
    state = this.hbck2.setTableState(hbck, TABLE_NAME, state.getState());
    assertTrue("Found=" + state.getState(), state.isDisabled());
  }
}
 
Example #10
Source File: TestSchedulingRecoveries.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
@Test
public void testSchedulingSCPWithTwoGoodHosts() throws IOException {
  String sn1 = TEST_UTIL.getHBaseCluster().getRegionServer(0).toString();
  String sn2 = TEST_UTIL.getHBaseCluster().getRegionServer(1).toString();
  try (ClusterConnection connection = this.hbck2.connect(); Hbck hbck = connection.getHbck()) {
    List<Long> pids = this.hbck2.scheduleRecoveries(hbck, new String[]{sn1, sn2});
    assertEquals(2, pids.size());
    assertTrue(pids.get(0) > 0);
    assertTrue(pids.get(1) > 0);
  }
}
 
Example #11
Source File: HBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
/**
 * @return List of results OR null if failed to run.
 */
private List<Boolean> bypass(String[] args) throws IOException {
  // Bypass has two options....
  Options options = new Options();
  // See usage for 'help' on these options.
  Option override = Option.builder("o").longOpt("override").build();
  options.addOption(override);
  Option recursive = Option.builder("r").longOpt("recursive").build();
  options.addOption(recursive);
  Option wait = Option.builder("w").longOpt("lockWait").hasArg().type(Integer.class).build();
  options.addOption(wait);
  // Parse command-line.
  CommandLineParser parser = new DefaultParser();
  CommandLine commandLine;
  try {
    commandLine = parser.parse(options, args, false);
  } catch (ParseException e) {
    showErrorMessage(e.getMessage());
    return null;
  }
  long lockWait = DEFAULT_LOCK_WAIT;
  if (commandLine.hasOption(wait.getOpt())) {
    lockWait = Integer.parseInt(commandLine.getOptionValue(wait.getOpt()));
  }
  String[] pidStrs = commandLine.getArgs();
  if (pidStrs == null || pidStrs.length <= 0) {
    showErrorMessage("No pids supplied.");
    return null;
  }
  boolean overrideFlag = commandLine.hasOption(override.getOpt());
  boolean recursiveFlag = commandLine.hasOption(recursive.getOpt());
  List<Long> pids = Arrays.stream(pidStrs).map(Long::valueOf).collect(Collectors.toList());
  try (ClusterConnection connection = connect(); Hbck hbck = connection.getHbck()) {
    checkFunctionSupported(connection, BYPASS);
    return hbck.bypassProcedure(pids, lockWait, overrideFlag, recursiveFlag);
  }
}
 
Example #12
Source File: HBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
void checkFunctionSupported(ClusterConnection connection, String cmd) throws IOException {
  if (skipCheck) {
    LOG.info("Skipped {} command version check; 'skip' set", cmd);
    return;
  }
  List<Method> methods = Arrays.asList(connection.getHbck().getClass().getDeclaredMethods());
  List<String> finalCmds = FUNCTION_NAME_MAP.getOrDefault(cmd, Collections.singletonList(cmd));
  boolean supported = methods.stream().anyMatch(method ->  finalCmds.contains(method.getName()));
  if (!supported) {
    throw new UnsupportedOperationException("This HBase cluster does not support command: "
            + cmd);
  }
}
 
Example #13
Source File: HBCK2.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
int setRegionState(ClusterConnection connection, String region,
      RegionState.State newState)
    throws IOException {
  if (newState == null) {
    throw new IllegalArgumentException("State can't be null.");
  }
  RegionState.State currentState = null;
  Table table = connection.getTable(TableName.valueOf("hbase:meta"));
  RowFilter filter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(region));
  Scan scan = new Scan();
  scan.setFilter(filter);
  Result result = table.getScanner(scan).next();
  if (result != null) {
    byte[] currentStateValue = result.getValue(HConstants.CATALOG_FAMILY,
      HConstants.STATE_QUALIFIER);
    if (currentStateValue == null) {
      System.out.println("WARN: Region state info on meta was NULL");
    } else {
      currentState = RegionState.State.valueOf(
          org.apache.hadoop.hbase.util.Bytes.toString(currentStateValue));
    }
    Put put = new Put(result.getRow());
    put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
      org.apache.hadoop.hbase.util.Bytes.toBytes(newState.name()));
    table.put(put);
    System.out.println("Changed region " + region + " STATE from "
      + currentState + " to " + newState);
    return EXIT_SUCCESS;
  } else {
    System.out.println("ERROR: Could not find region " + region + " in meta.");
  }
  return EXIT_FAILURE;
}
 
Example #14
Source File: BaseHRegionServer.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public ClusterConnection getClusterConnection() {
    throw new UnsupportedOperationException("Not implemented");
}
 
Example #15
Source File: BaseHRegionServer.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public ClusterConnection getConnection() {
    throw new UnsupportedOperationException("Not implemented");
}
 
Example #16
Source File: HBaseRpcChannelFactory.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public CoprocessorRpcChannel newRetryableChannel(TableName tableName,byte[] regionKey) throws IOException{
    Connection conn=HBaseConnectionFactory.getInstance(config).getConnection();
    return new RegionCoprocessorRpcChannel((ClusterConnection)conn,tableName,regionKey);
}
 
Example #17
Source File: HBaseRpcChannelFactory.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public CoprocessorRpcChannel newChannel(TableName tableName,byte[] regionKey) throws IOException{
    Connection conn=HBaseConnectionFactory.getInstance(config).getNoRetryConnection();
    return new RegionCoprocessorRpcChannel((ClusterConnection)conn,tableName,regionKey);
}
 
Example #18
Source File: HBasePartitionCache.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public void invalidateAdapter(TableName tableName) throws IOException {
    partitionAdapterCache.invalidate(tableName);
    ((ClusterConnection) HBaseConnectionFactory.getInstance(config).getConnection()).clearRegionCache(tableName);
    ((ClusterConnection) HBaseConnectionFactory.getInstance(config).getNoRetryConnection()).clearRegionCache(tableName);
}
 
Example #19
Source File: HBasePartitionCache.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public void invalidate(TableName tableName) throws IOException{
    partitionCache.invalidate(tableName);
    ((ClusterConnection) HBaseConnectionFactory.getInstance(config).getConnection()).clearRegionCache(tableName);
    ((ClusterConnection) HBaseConnectionFactory.getInstance(config).getNoRetryConnection()).clearRegionCache(tableName);
}
 
Example #20
Source File: AggregateQueryIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testSplitWithCachedMeta() throws Exception {
    // Tests that you don't get an ambiguous column exception when using the same alias as the column name
    String query = "SELECT a_string, b_string, count(1) FROM " + tableName + " WHERE organization_id=? and entity_id<=? GROUP BY a_string,b_string";
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    Admin admin = null;
    try {
        PreparedStatement statement = conn.prepareStatement(query);
        statement.setString(1, tenantId);
        statement.setString(2, ROW4);
        ResultSet rs = statement.executeQuery();
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(B_VALUE, rs.getString(2));
        assertEquals(2, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(C_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(E_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertFalse(rs.next());
        
        TableName tn =TableName.valueOf(tableName);
        admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
        Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
        org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(configuration);
        ((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));
        RegionLocator regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(tableName));
        int nRegions = regionLocator.getAllRegionLocations().size();
        admin.split(tn, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
        int retryCount = 0;
        do {
            Thread.sleep(2000);
            retryCount++;
            //htable.clearRegionCache();
        } while (retryCount < 10 && regionLocator.getAllRegionLocations().size() == nRegions);
        assertNotEquals(nRegions, regionLocator.getAllRegionLocations().size());
        
        statement.setString(1, tenantId);
        rs = statement.executeQuery();
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(B_VALUE, rs.getString(2));
        assertEquals(2, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(C_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(E_VALUE, rs.getString(2));
       assertEquals(1, rs.getLong(3));
        assertFalse(rs.next());
    } finally {
        if (admin != null) {
        admin.close();
        }
        conn.close();
    }
}
 
Example #21
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 4 votes vote down vote up
@Test (expected = UnsupportedOperationException.class)
public void testFunctionNotSupported() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect()) {
    this.hbck2.checkFunctionSupported(connection, "test");
  }
}
 
Example #22
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 4 votes vote down vote up
@Test
public void testFunctionSupported() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect()) {
    this.hbck2.checkFunctionSupported(connection, "scheduleRecoveries");
  }
}
 
Example #23
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 4 votes vote down vote up
@Test (expected = IllegalArgumentException.class)
public void testSetRegionStateInvalidRegionAndInvalidState() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect()) {
    this.hbck2.setRegionState(connection, "NO_REGION", null);
  }
}
 
Example #24
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 4 votes vote down vote up
@Test (expected = UnsupportedOperationException.class)
public void testVersions() throws IOException {
  try (ClusterConnection connection = this.hbck2.connect()) {
    this.hbck2.checkHBCKSupport(connection, "test", "10.0.0");
  }
}
 
Example #25
Source File: TestHBCK2.java    From hbase-operator-tools with Apache License 2.0 4 votes vote down vote up
@Test
public void testAssigns() throws IOException {
  try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
    List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
    for (RegionInfo ri: regions) {
      RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
          getRegionStates().getRegionState(ri.getEncodedName());
      LOG.info("RS: {}", rs.toString());
    }
    String [] regionStrsArray  =
        regions.stream().map(RegionInfo::getEncodedName).toArray(String[]::new);

    try (ClusterConnection connection = this.hbck2.connect(); Hbck hbck = connection.getHbck()) {
      unassigns(regions, regionStrsArray);
      List<Long> pids = this.hbck2.assigns(hbck, regionStrsArray);
      waitOnPids(pids);
      validateOpen(regions);
      // What happens if crappy region list passed?
      pids = this.hbck2.assigns(hbck, Arrays.stream(new String[]{"a", "some rubbish name"}).
              collect(Collectors.toList()).toArray(new String[]{}));
      for (long pid : pids) {
        assertEquals(org.apache.hadoop.hbase.procedure2.Procedure.NO_PROC_ID, pid);
      }

      // test input files
      unassigns(regions, regionStrsArray);
      File testFile = new File(TEST_UTIL.getDataTestDir().toString(), "inputForAssignsTest");
      try (FileOutputStream output = new FileOutputStream(testFile, false)) {
        for (String regionStr : regionStrsArray) {
          output.write((regionStr + System.lineSeparator()).getBytes());
        }
      }
      String result = testRunWithArgs(new String[]{ASSIGNS, "-i", testFile.toString()});
      Scanner scanner = new Scanner(result).useDelimiter("[\\D]+");
      pids = new ArrayList<>();
      while (scanner.hasNext()) {
          pids.add(scanner.nextLong());
      }
      scanner.close();
      waitOnPids(pids);
      validateOpen(regions);
    }
  }
}
 
Example #26
Source File: HBCK2.java    From hbase-operator-tools with Apache License 2.0 2 votes vote down vote up
/**
 * Create connection.
 * Needs to be called before we go against remote server.
 * Be sure to close when done.
 */
ClusterConnection connect() throws IOException {
  return (ClusterConnection)ConnectionFactory.createConnection(getConf());
}