Java Code Examples for org.apache.hadoop.hbase.util.FSUtils#getRegionDirFromRootDir()

The following examples show how to use org.apache.hadoop.hbase.util.FSUtils#getRegionDirFromRootDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WALSplitUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * This method will check 3 places for finding the max sequence id file. One is the expected
 * place, another is the old place under the region directory, and the last one is the wrong one
 * we introduced in HBASE-20734. See HBASE-22617 for more details.
 * <p/>
 * Notice that, you should always call this method instead of
 * {@link #getMaxRegionSequenceId(FileSystem, Path)} until 4.0.0 release.
 * @deprecated Only for compatibility, will be removed in 4.0.0.
 */
@Deprecated
public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region,
  IOExceptionSupplier<FileSystem> rootFsSupplier, IOExceptionSupplier<FileSystem> walFsSupplier)
  throws IOException {
  FileSystem rootFs = rootFsSupplier.get();
  FileSystem walFs = walFsSupplier.get();
  Path regionWALDir =
    CommonFSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName());
  // This is the old place where we store max sequence id file
  Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), region);
  // This is for HBASE-20734, where we use a wrong directory, see HBASE-22617 for more details.
  Path wrongRegionWALDir =
    CommonFSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName());
  long maxSeqId = getMaxRegionSequenceId(walFs, regionWALDir);
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(rootFs, regionDir));
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(walFs, wrongRegionWALDir));
  return maxSeqId;
}
 
Example 2
Source File: WALSplitUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether there is recovered.edits in the region dir
 * @param conf conf
 * @param regionInfo the region to check
 * @return true if recovered.edits exist in the region dir
 */
public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo)
    throws IOException {
  // No recovered.edits for non default replica regions
  if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
    return false;
  }
  // Only default replica region can reach here, so we can use regioninfo
  // directly without converting it to default replica's regioninfo.
  Path regionWALDir =
    CommonFSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
  Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), regionInfo);
  Path wrongRegionWALDir =
    CommonFSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
  FileSystem walFs = CommonFSUtils.getWALFileSystem(conf);
  FileSystem rootFs = CommonFSUtils.getRootDirFileSystem(conf);
  NavigableSet<Path> files = getSplitEditFilesSorted(walFs, regionWALDir);
  if (!files.isEmpty()) {
    return true;
  }
  files = getSplitEditFilesSorted(rootFs, regionDir);
  if (!files.isEmpty()) {
    return true;
  }
  files = getSplitEditFilesSorted(walFs, wrongRegionWALDir);
  return !files.isEmpty();
}
 
Example 3
Source File: HFileArchiver.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @return True if the Region exits in the filesystem.
 */
public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info)
    throws IOException {
  Path rootDir = CommonFSUtils.getRootDir(conf);
  Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info);
  return fs.exists(regionDir);
}
 
Example 4
Source File: TestIgnoreUnknownFamily.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void addStoreFileToKnownFamily(RegionInfo region) throws IOException {
  MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path regionDir =
    FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(mfs.getConfiguration()), region);
  Path familyDir = new Path(regionDir, Bytes.toString(UNKNOWN_FAMILY));
  StoreFileWriter writer =
      new StoreFileWriter.Builder(mfs.getConfiguration(), mfs.getFileSystem())
          .withOutputDir(familyDir).withFileContext(new HFileContextBuilder().build()).build();
  writer.close();
}
 
Example 5
Source File: MasterFileSystem.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * @return the directory for a give {@code region}.
 */
public Path getRegionDir(RegionInfo region) {
  return FSUtils.getRegionDirFromRootDir(getRootDir(), region);
}
 
Example 6
Source File: TestHFileArchiving.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testRemoveRegionDirOnArchive() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  UTIL.createTable(tableName, TEST_FAM);

  final Admin admin = UTIL.getAdmin();

  // get the current store files for the region
  List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
  // make sure we only have 1 region serving this table
  assertEquals(1, servingRegions.size());
  HRegion region = servingRegions.get(0);

  // and load the table
  UTIL.loadRegion(region, TEST_FAM);

  // shutdown the table so we can manipulate the files
  admin.disableTable(tableName);

  FileSystem fs = UTIL.getTestFileSystem();

  // now attempt to depose the region
  Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
  Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());

  HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

  // check for the existence of the archive directory and some files in it
  Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
  assertTrue(fs.exists(archiveDir));

  // check to make sure the store directory was copied
  FileStatus[] stores = fs.listStatus(archiveDir, new PathFilter() {
    @Override
    public boolean accept(Path p) {
      if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
        return false;
      }
      return true;
    }
  });
  assertTrue(stores.length == 1);

  // make sure we archived the store files
  FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
  assertTrue(storeFiles.length > 0);

  // then ensure the region's directory isn't present
  assertFalse(fs.exists(regionDir));

  UTIL.deleteTable(tableName);
}
 
Example 7
Source File: TestRecoveredEdits.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) throws
  IOException {
  Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  // Set it so we flush every 1M or so.  Thats a lot.
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase());
  // The file of recovered edits has a column family of 'meta'.
  final String columnFamily = "meta";
  byte[][] columnFamilyAsByteArray = new byte[][] { Bytes.toBytes(columnFamily) };
  TableDescriptor tableDescriptor = TableDescriptorBuilder
    .newBuilder(TableName.valueOf(testName.getMethodName())).setColumnFamily(
      ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)).build())
    .build();
  RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
  final String encodedRegionName = hri.getEncodedName();
  Path hbaseRootDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableDescriptor.getTableName());
  HRegionFileSystem hrfs =
      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
  if (fs.exists(hrfs.getRegionDir())) {
    LOG.info("Region directory already exists. Deleting.");
    fs.delete(hrfs.getRegionDir(), true);
  }
  HRegion region = HBaseTestingUtility
      .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // There should be no store files.
  assertTrue(storeFiles.isEmpty());
  region.close();
  Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri);
  Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir);
  // This is a little fragile getting this path to a file of 10M of edits.
  Path recoveredEditsFile = new Path(
    System.getProperty("test.build.classes", "target/test-classes"),
      "0000000000000016310");
  // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
  Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
  fs.copyToLocalFile(recoveredEditsFile, destination);
  assertTrue(fs.exists(destination));
  // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
  region = HRegion.openHRegion(region, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
  // we flush at 1MB, that there are at least 3 flushed files that are there because of the
  // replay of edits.
  if(policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) {
    assertTrue("Files count=" + storeFiles.size(), storeFiles.size() >= 1);
  } else {
    assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
  }
  // Now verify all edits made it into the region.
  int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
  LOG.info("Checked " + count + " edits made it in");
}