Java Code Examples for org.apache.hadoop.fs.FileSystem#access()

The following examples show how to use org.apache.hadoop.fs.FileSystem#access() .
Example 1
Project: hbase   File: HBaseFsck.java    License: Apache License 2.0 6 votes vote down vote up
private void preCheckPermission() throws IOException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = CommonFSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      fs.access(file.getPath(), FsAction.WRITE);
    } catch (AccessControlException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
 
Example 2
Project: hadoop   File: TestAclWithSnapshot.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given
 * directory.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path directory to check
 * @throws Exception if there is an unexpected error
 */
private static void assertDirPermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    fs.listStatus(pathToCheck);
    fs.access(pathToCheck, FsAction.READ);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example 3
Project: big-c   File: TestAclWithSnapshot.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given
 * directory.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path directory to check
 * @throws Exception if there is an unexpected error
 */
private static void assertDirPermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    fs.listStatus(pathToCheck);
    fs.access(pathToCheck, FsAction.READ);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example 4
Project: hbase   File: HStore.java    License: Apache License 2.0 5 votes vote down vote up
public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException {
  LOG.info("Validating recovered hfile at {} for inclusion in store {}", path, this);
  FileSystem srcFs = path.getFileSystem(conf);
  srcFs.access(path, FsAction.READ_WRITE);
  try (HFile.Reader reader =
      HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) {
    Optional<byte[]> firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
    Optional<Cell> lk = reader.getLastKey();
    Preconditions.checkState(lk.isPresent(), "Last key can not be null");
    byte[] lastKey = CellUtil.cloneRow(lk.get());
    if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
      throw new WrongRegionException("Recovered hfile " + path.toString() +
          " does not fit inside region " + this.getRegionInfo().getRegionNameAsString());
    }
  }

  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
  HStoreFile sf = createStoreFileAndReader(dstPath);
  StoreFileReader r = sf.getReader();
  this.storeSize.addAndGet(r.length());
  this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());

  this.lock.writeLock().lock();
  try {
    this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf));
  } finally {
    this.lock.writeLock().unlock();
  }

  LOG.info("Loaded recovered hfile to {}, entries={}, sequenceid={}, filesize={}", sf,
    r.getEntries(), r.getSequenceID(), TraditionalBinaryPrefix.long2String(r.length(), "B", 1));
  return sf;
}
 
Example 5
Project: incubator-retired-blur   File: ClusterDriver.java    License: Apache License 2.0 5 votes vote down vote up
private static boolean hasReadAccess(FileSystem fileSystem, Path p) {
  try {
    fileSystem.access(p, FsAction.READ);
    return true;
  } catch (IOException e) {
    return false;
  }
}
 
Example 6
Project: hbase   File: HStore.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * This throws a WrongRegionException if the HFile does not fit in this region, or an
 * InvalidHFileException if the HFile is not valid.
 */
public void assertBulkLoadHFileOk(Path srcPath) throws IOException {
  HFile.Reader reader  = null;
  try {
    LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this);
    FileSystem srcFs = srcPath.getFileSystem(conf);
    srcFs.access(srcPath, FsAction.READ_WRITE);
    reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf);

    Optional<byte[]> firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
    Optional<Cell> lk = reader.getLastKey();
    Preconditions.checkState(lk.isPresent(), "Last key can not be null");
    byte[] lastKey =  CellUtil.cloneRow(lk.get());

    if (LOG.isDebugEnabled()) {
      LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey.get()) +
          " last=" + Bytes.toStringBinary(lastKey));
      LOG.debug("Region bounds: first=" +
          Bytes.toStringBinary(getRegionInfo().getStartKey()) +
          " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey()));
    }

    if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
      throw new WrongRegionException(
          "Bulk load file " + srcPath.toString() + " does not fit inside region "
          + this.getRegionInfo().getRegionNameAsString());
    }

    if(reader.length() > conf.getLong(HConstants.HREGION_MAX_FILESIZE,
        HConstants.DEFAULT_MAX_FILE_SIZE)) {
      LOG.warn("Trying to bulk load hfile " + srcPath + " with size: " +
          reader.length() + " bytes can be problematic as it may lead to oversplitting.");
    }

    if (verifyBulkLoads) {
      long verificationStartTime = EnvironmentEdgeManager.currentTime();
      LOG.info("Full verification started for bulk load hfile: {}", srcPath);
      Cell prevCell = null;
      HFileScanner scanner = reader.getScanner(false, false, false);
      scanner.seekTo();
      do {
        Cell cell = scanner.getCell();
        if (prevCell != null) {
          if (comparator.compareRows(prevCell, cell) > 0) {
            throw new InvalidHFileException("Previous row is greater than"
                + " current row: path=" + srcPath + " previous="
                + CellUtil.getCellKeyAsString(prevCell) + " current="
                + CellUtil.getCellKeyAsString(cell));
          }
          if (CellComparator.getInstance().compareFamilies(prevCell, cell) != 0) {
            throw new InvalidHFileException("Previous key had different"
                + " family compared to current key: path=" + srcPath
                + " previous="
                + Bytes.toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(),
                    prevCell.getFamilyLength())
                + " current="
                + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
                    cell.getFamilyLength()));
          }
        }
        prevCell = cell;
      } while (scanner.next());
      LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString() +
        " took " + (EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms");
    }
  } finally {
    if (reader != null) {
      reader.close();
    }
  }
}