org.apache.hadoop.hbase.util.FSUtils Java Examples

The following examples show how to use org.apache.hadoop.hbase.util.FSUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RestoreSnapshotHelper.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @return the set of the regions contained in the table
 */
private List<RegionInfo> getTableRegions() throws IOException {
  LOG.debug("get table regions: " + tableDir);
  FileStatus[] regionDirs =
    CommonFSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
  if (regionDirs == null) {
    return null;
  }

  List<RegionInfo> regions = new ArrayList<>(regionDirs.length);
  for (int i = 0; i < regionDirs.length; ++i) {
    RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
    regions.add(hri);
  }
  LOG.debug("found " + regions.size() + " regions for table=" +
    tableDesc.getTableName().getNameAsString());
  return regions;
}
 
Example #2
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example #3
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, tableName + ".hlog");
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  WAL hLog = walFactory.getWAL(info);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example #4
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example #5
Source File: TestTruncateTableProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected Flow executeFromState(MasterProcedureEnv env,
  MasterProcedureProtos.TruncateTableState state) throws InterruptedException {

  if (!failOnce &&
    state == MasterProcedureProtos.TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT) {
    try {
      // To emulate an HDFS failure, create only the first region directory
      RegionInfo regionInfo = getFirstRegionInfo();
      Configuration conf = env.getMasterConfiguration();
      MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
      Path tempdir = mfs.getTempDir();
      Path tableDir = CommonFSUtils.getTableDir(tempdir, regionInfo.getTable());
      Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
      FileSystem fs = FileSystem.get(conf);
      fs.mkdirs(regionDir);

      failOnce = true;
      return Flow.HAS_MORE_STATE;
    } catch (IOException e) {
      fail("failed to create a region directory: " + e);
    }
  }

  return super.executeFromState(env, state);
}
 
Example #6
Source File: BackupEndpointObserver.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void start(CoprocessorEnvironment e) throws IOException {
    try {
        region = (HRegion) ((RegionCoprocessorEnvironment) e).getRegion();
        String[] name = region.getTableDescriptor().getTableName().getNameAsString().split(":");
        if (name.length == 2) {
            namespace = name[0];
            tableName = name[1];
        }
        else {
            tableName = name[0];
        }
        regionName = region.getRegionInfo().getEncodedName();

        conf = HConfiguration.unwrapDelegate();
        rootDir = FSUtils.getRootDir(conf);
        fs = FSUtils.getCurrentFileSystem(conf);
        backupDir = new Path(rootDir, BackupRestoreConstants.BACKUP_DIR + "/data/splice/" + tableName + "/" + regionName);
        preparing = new AtomicBoolean(false);
        isCompacting = new AtomicBoolean(false);
        isSplitting = new AtomicBoolean(false);
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
Example #7
Source File: MasterProcedureTestingUtility.java    From hbase with Apache License 2.0 6 votes vote down vote up
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
    final String family) throws IOException {
  // verify htd
  TableDescriptor htd = master.getTableDescriptors().get(tableName);
  assertTrue(htd != null);
  assertFalse(htd.hasColumnFamily(Bytes.toBytes(family)));

  // verify fs
  final FileSystem fs = master.getMasterFileSystem().getFileSystem();
  final Path tableDir =
    CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
  for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) {
    final Path familyDir = new Path(regionDir, family);
    assertFalse(family + " family dir should not exist", fs.exists(familyDir));
  }
}
 
Example #8
Source File: AbstractHoplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
Example #9
Source File: TestHbckChore.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testOrphanRegionsOnFS() throws Exception {
  TableName tableName = TableName.valueOf("testOrphanRegionsOnFS");
  RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
  Configuration conf = util.getConfiguration();

  hbckChore.choreForTesting();
  assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());

  HRegion.createRegionDir(conf, regionInfo, CommonFSUtils.getRootDir(conf));
  hbckChore.choreForTesting();
  assertEquals(1, hbckChore.getOrphanRegionsOnFS().size());
  assertTrue(hbckChore.getOrphanRegionsOnFS().containsKey(regionInfo.getEncodedName()));

  FSUtils.deleteRegionDir(conf, regionInfo);
  hbckChore.choreForTesting();
  assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());
}
 
Example #10
Source File: TestCreateTableProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected Flow executeFromState(MasterProcedureEnv env,
  MasterProcedureProtos.CreateTableState state) throws InterruptedException {

  if (!failOnce &&
    state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT) {
    try {
      // To emulate an HDFS failure, create only the first region directory
      RegionInfo regionInfo = getFirstRegionInfo();
      Configuration conf = env.getMasterConfiguration();
      MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
      Path tempdir = mfs.getTempDir();
      Path tableDir = CommonFSUtils.getTableDir(tempdir, regionInfo.getTable());
      Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
      FileSystem fs = FileSystem.get(conf);
      fs.mkdirs(regionDir);

      failOnce = true;
      return Flow.HAS_MORE_STATE;
    } catch (IOException e) {
      fail("failed to create a region directory: " + e);
    }
  }

  return super.executeFromState(env, state);
}
 
Example #11
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example #12
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example #13
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
Example #14
Source File: StoreFileInfo.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * helper function to compute HDFS blocks distribution of a given reference file.For reference
 * file, we don't compute the exact value. We use some estimate instead given it might be good
 * enough. we assume bottom part takes the first half of reference file, top part takes the second
 * half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile,
 * also the number and size of keys vary. If this estimate isn't good enough, we can improve it
 * later.
 * @param fs The FileSystem
 * @param reference The reference
 * @param status The reference FileStatus
 * @return HDFS blocks distribution
 */
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs,
    final Reference reference, final FileStatus status) throws IOException {
  if (status == null) {
    return null;
  }

  long start = 0;
  long length = 0;

  if (Reference.isTopFileRegion(reference.getFileRegion())) {
    start = status.getLen() / 2;
    length = status.getLen() - status.getLen() / 2;
  } else {
    start = 0;
    length = status.getLen() / 2;
  }
  return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
}
 
Example #15
Source File: SplitRegionScanner.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
void createAndRegisterClientSideRegionScanner(Table table, Scan newScan, Partition partition) throws Exception {
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "createAndRegisterClientSideRegionScanner with table=%s, scan=%s, tableConfiguration=%s", table, newScan, table.getConfiguration());
    Configuration conf = table.getConfiguration();
    if (System.getProperty("hbase.rootdir") != null) {
        conf.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
        jobConfig.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
    }

    SkeletonClientSideRegionScanner skeletonClientSideRegionScanner =
            new HBaseClientSideRegionScanner(table,
                    jobConfig,
                    FSUtils.getCurrentFileSystem(conf),
                    FSUtils.getRootDir(conf),
                    ((HPartitionDescriptor)partition.getDescriptor()).getDescriptor(),
                    ((RangedClientPartition) partition).getRegionInfo(),
                    newScan, partition.owningServer().getHostAndPort());
    this.region = skeletonClientSideRegionScanner.getRegion();
    registerRegionScanner(skeletonClientSideRegionScanner);
}
 
Example #16
Source File: HFileCorruptionChecker.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check all files in a column family dir.
 *
 * @param cfDir
 *          column family directory
 * @throws IOException
 */
protected void checkColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkHFile(hf);
  }
}
 
Example #17
Source File: HFileCorruptionChecker.java    From hbase-operator-tools with Apache License 2.0 6 votes vote down vote up
/**
 * Check all files in a column family dir.
 */
protected void checkColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkHFile(hf);
  }
}
 
Example #18
Source File: CachedClusterId.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress,
 * synchronously fetches the cluster ID and sets it. If an attempt is already in progress,
 * returns right away and the caller is expected to wait for the fetch to finish.
 * @return true if the attempt is done, false if another thread is already fetching it.
 */
private boolean attemptFetch() {
  if (fetchInProgress.compareAndSet(false, true)) {
    // A fetch is not in progress, so try fetching the cluster ID synchronously and then notify
    // the waiting threads.
    try {
      cacheMisses.incrementAndGet();
      setClusterId(FSUtils.getClusterId(fs, rootDir));
    } catch (IOException e) {
      LOG.warn("Error fetching cluster ID", e);
    } finally {
      Preconditions.checkState(fetchInProgress.compareAndSet(true, false));
      synchronized (fetchInProgress) {
        fetchInProgress.notifyAll();
      }
    }
    return true;
  }
  return false;
}
 
Example #19
Source File: HFileCorruptionChecker.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check all files in a mob column family dir.
 *
 * @param cfDir
 *          mob column family directory
 * @throws IOException
 */
protected void checkMobColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Mob colfam Directory " + cfDir +
        " does not exist.  Likely the table is deleted. Skipping.");
    missedMobFiles.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Mob colfam Directory " + cfDir +
        " does not exist.  Likely the table is deleted. Skipping.");
    missedMobFiles.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkMobFile(hf);
  }
}
 
Example #20
Source File: MetricsRegionServerWrapperImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
  this.regionServer = regionServer;
  initBlockCache();
  initMobFileCache();

  this.period = regionServer.getConfiguration().getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
    HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);

  this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
  this.runnable = new RegionServerMetricsWrapperRunnable();
  this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
    TimeUnit.MILLISECONDS);
  this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
  this.allocator = regionServer.getRpcServer().getByteBuffAllocator();

  try {
    this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
  } catch (IOException e) {
    LOG.warn("Failed to get hedged metrics", e);
  }
  if (LOG.isInfoEnabled()) {
    LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
  }
}
 
Example #21
Source File: CompactionTool.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = CommonFSUtils.listStatus(fs, path);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
Example #22
Source File: HRegionFileSystem.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  srcPath = srcFs.resolvePath(srcPath);
  FileSystem realSrcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
}
 
Example #23
Source File: WALSplitUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * This method will check 3 places for finding the max sequence id file. One is the expected
 * place, another is the old place under the region directory, and the last one is the wrong one
 * we introduced in HBASE-20734. See HBASE-22617 for more details.
 * <p/>
 * Notice that, you should always call this method instead of
 * {@link #getMaxRegionSequenceId(FileSystem, Path)} until 4.0.0 release.
 * @deprecated Only for compatibility, will be removed in 4.0.0.
 */
@Deprecated
public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region,
  IOExceptionSupplier<FileSystem> rootFsSupplier, IOExceptionSupplier<FileSystem> walFsSupplier)
  throws IOException {
  FileSystem rootFs = rootFsSupplier.get();
  FileSystem walFs = walFsSupplier.get();
  Path regionWALDir =
    CommonFSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName());
  // This is the old place where we store max sequence id file
  Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), region);
  // This is for HBASE-20734, where we use a wrong directory, see HBASE-22617 for more details.
  Path wrongRegionWALDir =
    CommonFSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName());
  long maxSeqId = getMaxRegionSequenceId(walFs, regionWALDir);
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(rootFs, regionDir));
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(walFs, wrongRegionWALDir));
  return maxSeqId;
}
 
Example #24
Source File: HRegionFileSystem.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Clean up any split detritus that may have been left around from previous
 * split attempts.
 * Call this method on initial region deploy.
 * @throws IOException
 */
void cleanupAnySplitDetritus() throws IOException {
  Path splitdir = this.getSplitsDir();
  if (!fs.exists(splitdir)) return;
  // Look at the splitdir.  It could have the encoded names of the daughter
  // regions we tried to make.  See if the daughter regions actually got made
  // out under the tabledir.  If here under splitdir still, then the split did
  // not complete.  Try and do cleanup.  This code WILL NOT catch the case
  // where we successfully created daughter a but regionserver crashed during
  // the creation of region b.  In this case, there'll be an orphan daughter
  // dir in the filesystem.  TOOD: Fix.
  FileStatus[] daughters = CommonFSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
  if (daughters != null) {
    for (FileStatus daughter: daughters) {
      Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
      if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
        throw new IOException("Failed delete of " + daughterDir);
      }
    }
  }
  cleanupSplitsDir();
  LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
 
Example #25
Source File: AbstractHoplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
Example #26
Source File: HFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Returns all HFiles belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
public static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
    throws IOException {
  List<Path> regionHFiles = new ArrayList<>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDirectory() &&
          (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) &&
          (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) {
        regionHFiles.add(file.getPath());
      }
    }
  }
  return regionHFiles;
}
 
Example #27
Source File: HRegionFileSystem.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
  * Create a view to the on-disk region
  * @param conf the {@link Configuration} to use
  * @param fs {@link FileSystem} that contains the region
  * @param tableDir {@link Path} to where the table is being stored
  * @param regionInfo {@link RegionInfo} for region
  */
 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
     final RegionInfo regionInfo) {
   this.fs = fs;
   this.conf = conf;
   this.tableDir = Objects.requireNonNull(tableDir, "tableDir is null");
   this.regionInfo = Objects.requireNonNull(regionInfo, "regionInfo is null");
   this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
   this.regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
   this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
     DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
   this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
     DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
}
 
Example #28
Source File: MasterWalManager.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Checks to see if the file system is still accessible.
 * If not, sets closed
 * @return false if file system is not available
 */
private boolean checkFileSystem() {
  if (this.fsOk) {
    try {
      FSUtils.checkFileSystemAvailable(this.fs);
      FSUtils.checkDfsSafeMode(this.conf);
    } catch (IOException e) {
      services.abort("Shutting down HBase cluster: file system not available", e);
      this.fsOk = false;
    }
  }
  return this.fsOk;
}
 
Example #29
Source File: TestHRegionFileSystem.java    From hbase with Apache License 2.0 5 votes vote down vote up
private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf)
    throws IOException {
  FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
  Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName());
  List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
  assertEquals(1, regionDirs.size());
  List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0));
  assertEquals(2, familyDirs.size());
  RegionInfo hri =
    conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion();
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri);
  return regionFs;
}
 
Example #30
Source File: HFileCorruptionChecker.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Check all column families in a region dir.
 *
 * @param regionDir
 *          region directory
 * @throws IOException
 */
protected void checkRegionDir(Path regionDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(regionDir);
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (cfs.isEmpty() && !fs.exists(regionDir)) {
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  for (FileStatus cfFs : cfs) {
    Path cfDir = cfFs.getPath();
    checkColFamDir(cfDir);
  }
}