Java Code Examples for org.apache.hadoop.hbase.client.RegionInfo#getEndKey()

The following examples show how to use org.apache.hadoop.hbase.client.RegionInfo#getEndKey() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Export.java    From hbase with Apache License 2.0 6 votes vote down vote up
private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request)
    throws IOException {
  Scan scan = ProtobufUtil.toScan(request.getScan());
  byte[] regionStartKey = region.getStartKey();
  byte[] originStartKey = scan.getStartRow();
  if (originStartKey == null
          || Bytes.compareTo(originStartKey, regionStartKey) < 0) {
    scan.withStartRow(regionStartKey);
  }
  byte[] regionEndKey = region.getEndKey();
  byte[] originEndKey = scan.getStopRow();
  if (originEndKey == null
          || Bytes.compareTo(originEndKey, regionEndKey) > 0) {
    scan.withStartRow(regionEndKey);
  }
  return scan;
}
 
Example 2
Source File: TestRegionSplitter.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void verifyBounds(List<byte[]> expectedBounds, TableName tableName)
        throws Exception {
  // Get region boundaries from the cluster and verify their endpoints
  final int numRegions = expectedBounds.size()-1;
  try (Table table = UTIL.getConnection().getTable(tableName);
      RegionLocator locator = UTIL.getConnection().getRegionLocator(tableName)) {
    final List<HRegionLocation> regionInfoMap = locator.getAllRegionLocations();
    assertEquals(numRegions, regionInfoMap.size());
    for (HRegionLocation entry : regionInfoMap) {
      final RegionInfo regionInfo = entry.getRegion();
      byte[] regionStart = regionInfo.getStartKey();
      byte[] regionEnd = regionInfo.getEndKey();

      // This region's start key should be one of the region boundaries
      int startBoundaryIndex = indexOfBytes(expectedBounds, regionStart);
      assertNotSame(-1, startBoundaryIndex);

      // This region's end key should be the region boundary that comes
      // after the starting boundary.
      byte[] expectedRegionEnd = expectedBounds.get(startBoundaryIndex + 1);
      assertEquals(0, Bytes.compareTo(regionEnd, expectedRegionEnd));
    }
  }
}
 
Example 3
Source File: TestEndToEndSplitTransaction.java    From hbase with Apache License 2.0 6 votes vote down vote up
void verifyTableRegions(Set<RegionInfo> regions) {
  log("Verifying " + regions.size() + " regions: " + regions);

  byte[][] startKeys = new byte[regions.size()][];
  byte[][] endKeys = new byte[regions.size()][];

  int i = 0;
  for (RegionInfo region : regions) {
    startKeys[i] = region.getStartKey();
    endKeys[i] = region.getEndKey();
    i++;
  }

  Pair<byte[][], byte[][]> keys = new Pair<>(startKeys, endKeys);
  verifyStartEndKeys(keys);
}
 
Example 4
Source File: MergeTableRegionsProcedure.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Create merged region info by looking at passed in <code>regionsToMerge</code>
 * to figure what extremes for start and end keys to use; merged region needs
 * to have an extent sufficient to cover all regions-to-merge.
 */
private static RegionInfo createMergedRegionInfo(final RegionInfo[] regionsToMerge) {
  byte [] lowestStartKey = null;
  byte [] highestEndKey = null;
  // Region Id is a timestamp. Merged region's id can't be less than that of
  // merging regions else will insert at wrong location in hbase:meta (See HBASE-710).
  long highestRegionId = -1;
  for (RegionInfo ri: regionsToMerge) {
    if (lowestStartKey == null) {
      lowestStartKey = ri.getStartKey();
    } else if (Bytes.compareTo(ri.getStartKey(), lowestStartKey) < 0) {
      lowestStartKey = ri.getStartKey();
    }
    if (highestEndKey == null) {
      highestEndKey = ri.getEndKey();
    } else if (ri.isLast() || Bytes.compareTo(ri.getEndKey(), highestEndKey) > 0) {
      highestEndKey = ri.getEndKey();
    }
    highestRegionId = ri.getRegionId() > highestRegionId? ri.getRegionId(): highestRegionId;
  }
  // Merged region is sorted between two merging regions in META
  return RegionInfoBuilder.newBuilder(regionsToMerge[0].getTable()).
      setStartKey(lowestStartKey).
      setEndKey(highestEndKey).
      setSplit(false).
      setRegionId(highestRegionId + 1/*Add one so new merged region is highest*/).
      build();
}
 
Example 5
Source File: TestTableResource.java    From hbase with Apache License 2.0 5 votes vote down vote up
void checkTableInfo(TableInfoModel model) {
  assertEquals(model.getName(), TABLE.getNameAsString());
  Iterator<TableRegionModel> regions = model.getRegions().iterator();
  assertTrue(regions.hasNext());
  while (regions.hasNext()) {
    TableRegionModel region = regions.next();
    boolean found = false;
    LOG.debug("looking for region " + region.getName());
    for (HRegionLocation e: regionMap) {
      RegionInfo hri = e.getRegion();
      // getRegionNameAsString uses Bytes.toStringBinary which escapes some non-printable
      // characters
      String hriRegionName = Bytes.toString(hri.getRegionName());
      String regionName = region.getName();
      LOG.debug("comparing to region " + hriRegionName);
      if (hriRegionName.equals(regionName)) {
        found = true;
        byte[] startKey = hri.getStartKey();
        byte[] endKey = hri.getEndKey();
        ServerName serverName = e.getServerName();
        InetSocketAddress sa =
            new InetSocketAddress(serverName.getHostname(), serverName.getPort());
        String location = sa.getHostName() + ":" +
          Integer.valueOf(sa.getPort());
        assertEquals(hri.getRegionId(), region.getId());
        assertTrue(Bytes.equals(startKey, region.getStartKey()));
        assertTrue(Bytes.equals(endKey, region.getEndKey()));
        assertEquals(location, region.getLocation());
        break;
      }
    }
    assertTrue("Couldn't find region " + region.getName(), found);
  }
}
 
Example 6
Source File: HBaseFsck.java    From hbase with Apache License 2.0 4 votes vote down vote up
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<RegionInfo> regions = MetaTableAccessor.getAllRegions(connection, true);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    Path hbaseRoot = CommonFSUtils.getRootDir(getConf());
    for (RegionInfo regionInfo : regions) {
      Path tableDir = CommonFSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(tableDir, regionInfo.getEncodedName());
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(),
              CacheConfig.DISABLED, true, getConf());
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) {
              storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) {
              storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;

      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.

      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(regionInfo.getTable()));
        LOG.warn("Region's boundaries not aligned between stores and META for:");
        LOG.warn(Objects.toString(currentRegionBoundariesInformation));
      }
    }
  } catch (IOException e) {
    LOG.error(e.toString(), e);
  }
}
 
Example 7
Source File: RegionInfoDisplay.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Get the end key for display. Optionally hide the real end key.
 * @return the endkey
 */
public static byte[] getEndKeyForDisplay(RegionInfo ri, Configuration conf) {
  boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
  if (displayKey) return ri.getEndKey();
  return HIDDEN_END_KEY;
}
 
Example 8
Source File: BaseResultIterators.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Get parallel scans of the specified scan boundaries. This can be used for getting parallel
 * scans when there is split/merges while scanning a chunk. In this case we need not go by all
 * the regions or guideposts.
 * @param scan
 * @return
 * @throws SQLException
 */
private List<List<Scan>> getParallelScans(Scan scan) throws SQLException {
    List<HRegionLocation> regionLocations = getRegionBoundaries(scanGrouper);
    List<byte[]> regionBoundaries = toBoundaries(regionLocations);
    int regionIndex = 0;
    int stopIndex = regionBoundaries.size();
    if (scan.getStartRow().length > 0) {
        regionIndex = getIndexContainingInclusive(regionBoundaries, scan.getStartRow());
    }
    if (scan.getStopRow().length > 0) {
        stopIndex = Math.min(stopIndex, regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), scan.getStopRow()));
    }
    List<List<Scan>> parallelScans = Lists.newArrayListWithExpectedSize(stopIndex - regionIndex + 1);
    List<Scan> scans = Lists.newArrayListWithExpectedSize(2);
    while (regionIndex <= stopIndex) {
        HRegionLocation regionLocation = regionLocations.get(regionIndex);
        RegionInfo regionInfo = regionLocation.getRegion();
        Scan newScan = ScanUtil.newScan(scan);
        byte[] endKey;
        if (regionIndex == stopIndex) {
            endKey = scan.getStopRow();
        } else {
            endKey = regionBoundaries.get(regionIndex);
        }
        if(ScanUtil.isLocalIndex(scan)) {
            ScanUtil.setLocalIndexAttributes(newScan, 0, regionInfo.getStartKey(),
                regionInfo.getEndKey(), newScan.getAttribute(SCAN_START_ROW_SUFFIX),
                newScan.getAttribute(SCAN_STOP_ROW_SUFFIX));
        } else {
            if(Bytes.compareTo(scan.getStartRow(), regionInfo.getStartKey())<=0) {
                newScan.setAttribute(SCAN_ACTUAL_START_ROW, regionInfo.getStartKey());
                newScan.setStartRow(regionInfo.getStartKey());
            }
            if(scan.getStopRow().length == 0 || (regionInfo.getEndKey().length != 0 && Bytes.compareTo(scan.getStopRow(), regionInfo.getEndKey())>0)) {
                newScan.setStopRow(regionInfo.getEndKey());
            }
        }
        scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation);
        regionIndex++;
    }
    if (!scans.isEmpty()) { // Add any remaining scans
        parallelScans.add(scans);
    }
    return parallelScans;
}
 
Example 9
Source File: BackupEndpointObserver.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public SpliceMessage.PrepareBackupResponse.Builder prepare(SpliceMessage.PrepareBackupRequest request) throws Exception{

        SpliceMessage.PrepareBackupResponse.Builder responseBuilder = SpliceMessage.PrepareBackupResponse.newBuilder();
        responseBuilder.setReadyForBackup(false);

        if (!BackupUtils.regionKeysMatch(request, region)) {
            // if the start/end key of the request does not match this region, return false to the client, because
            // region has been split. The client should retry.
            SpliceLogUtils.info(LOG, "preparing backup for table %s region %s", tableName, regionName);
            SpliceLogUtils.info(LOG, "Region keys do not match with keys in the request");
            return responseBuilder;
        }

        boolean canceled = false;

        long backupId = request.getBackupId();
        String backupJobPath = BackupUtils.getBackupPath() + "/" + backupId;
        String regionBackupPath = backupJobPath + "/" + tableName + "/" + regionName;
        if (isSplitting.get() || isCompacting.get()) {
            SpliceLogUtils.info(LOG, "table %s region %s is not ready for backup: isSplitting=%s, isCompacting=%s",
                    tableName , regionName, isSplitting.get(), isCompacting.get());

            // return false to client if the region is being split
            responseBuilder.setReadyForBackup(false);
        } else {
            if (LOG.isDebugEnabled()) {
                SpliceLogUtils.debug(LOG, "%s:%s waits for flush and compaction to complete", tableName, regionName);
            }

            // A region might have been in backup. This is unlikely to happen unless the previous response was lost
            // and the client is retrying
            if (!BackupUtils.regionIsBeingBackup(tableName, regionName, backupJobPath, regionBackupPath)) {
                // Flush memstore and Wait for flush and compaction to be done
                region.flushcache(false,false, null);
                region.waitForFlushesAndCompactions();

                canceled = BackupUtils.backupCanceled();
                if (!canceled) {
                    // Create a ZNode to indicate that the region is being copied
                    RegionInfo regionInfo = region.getRegionInfo();
                    BackupRegionStatus backupRegionStatus = new BackupRegionStatus(regionInfo.getStartKey(), regionInfo.getEndKey(),
                            HConfiguration.BACKUP_IN_PROGRESS);
                    boolean created = ZkUtils.recursiveSafeCreate(regionBackupPath, backupRegionStatus.toBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
                    if (LOG.isDebugEnabled()) {
                        if (ZkUtils.getRecoverableZooKeeper().exists(regionBackupPath, false) != null) {
                            SpliceLogUtils.debug(LOG,"created znode %s to mark backup in progress, created = %s", regionBackupPath, created);
                        }else {
                            SpliceLogUtils.warn(LOG, "failed to create znode %s, created = %s", regionBackupPath, created);
                        }
                    }

                    if (isCompacting.get() || isSplitting.get()) {

                        SpliceLogUtils.info(LOG, "table %s region %s is not ready for backup: isSplitting=%s, isCompacting=%s",
                                tableName, regionName, isSplitting.get(), isCompacting.get());
                        SpliceLogUtils.info(LOG, "delete znode %d", regionBackupPath);

                        ZkUtils.recursiveDelete(regionBackupPath);
                    }
                    else {
                        responseBuilder.setReadyForBackup(true);
                        if (LOG.isDebugEnabled()) {
                            SpliceLogUtils.debug(LOG, "%s:%s is ready for backup", tableName, regionName);
                        }
                    }
                }
            }
            else
                responseBuilder.setReadyForBackup(true);
        }
        return responseBuilder;
    }