Java Code Examples for org.apache.hadoop.hbase.util.FSUtils#listStatus()

The following examples show how to use org.apache.hadoop.hbase.util.FSUtils#listStatus() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractHoplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
Example 2
Source File: AbstractHoplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
Example 3
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
@Override
public long getModificationTimeStamp() throws IOException {
  FileStatus[] stats = FSUtils.listStatus(fs, path, null);
  if (stats != null && stats.length == 1) {
    return stats[0].getModificationTime();
  } else {
    return 0;
  }
}
 
Example 4
Source File: HDFSUnsortedHoplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private FileStatus[] getExpiredHoplogs() throws IOException {
  FileStatus files[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All expired hoplog end with expire extension and must match the valid file regex
      String fileName = file.getName();
      if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
        return false;
      }
      return true;
    }
  });
  return files;
}
 
Example 5
Source File: HdfsSortedOplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
protected FileStatus[] getExpiryMarkers() throws IOException {
  FileSystem fs = store.getFileSystem();
  if (hoplogReadersController.hoplogs == null
      || hoplogReadersController.hoplogs.size() == 0) {
    // there are no hoplogs in the system. May be the bucket is not existing
    // at all.
    if (!fs.exists(bucketPath)) {
      logger.fine("This bucket is unused, skipping expired hoplog check");
      return null;
    }
  }
  
  FileStatus files[] = FSUtils.listStatus(fs, bucketPath, new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All expired hoplog end with expire extension and must match the valid file regex
      String fileName = file.getName();
      if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
        return false;
      }
      fileName = truncateExpiryExtension(fileName);
      Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(fileName);
      return matcher.find();
    }

  });
  return files;
}
 
Example 6
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
@Override
public long getModificationTimeStamp() throws IOException {
  FileStatus[] stats = FSUtils.listStatus(fs, path, null);
  if (stats != null && stats.length == 1) {
    return stats[0].getModificationTime();
  } else {
    return 0;
  }
}
 
Example 7
Source File: HDFSUnsortedHoplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private FileStatus[] getExpiredHoplogs() throws IOException {
  FileStatus files[] = FSUtils.listStatus(fileSystem, bucketPath, new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All expired hoplog end with expire extension and must match the valid file regex
      String fileName = file.getName();
      if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
        return false;
      }
      return true;
    }
  });
  return files;
}
 
Example 8
Source File: HdfsSortedOplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
protected FileStatus[] getExpiryMarkers() throws IOException {
  FileSystem fs = store.getFileSystem();
  if (hoplogReadersController.hoplogs == null
      || hoplogReadersController.hoplogs.size() == 0) {
    // there are no hoplogs in the system. May be the bucket is not existing
    // at all.
    if (!fs.exists(bucketPath)) {
      logger.fine("This bucket is unused, skipping expired hoplog check");
      return null;
    }
  }
  
  FileStatus files[] = FSUtils.listStatus(fs, bucketPath, new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All expired hoplog end with expire extension and must match the valid file regex
      String fileName = file.getName();
      if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
        return false;
      }
      fileName = truncateExpiryExtension(fileName);
      Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(fileName);
      return matcher.find();
    }

  });
  return files;
}
 
Example 9
Source File: HDFSUnsortedHoplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Fixes the size of hoplogs that were not closed properly last time. 
 * Such hoplogs are *.tmphop files. Identify them and open them and close 
 * them, this fixes the size. After doing this rename them to *.hop. 
 * 
 * @throws IOException
 * @throws ForceReattemptException 
 */
void identifyAndFixTmpHoplogs(FileSystem fs) throws IOException, ForceReattemptException {
  logger.fine("Fixing temporary hoplogs");
  
  // A different filesystem is passed to this function for the following reason: 
  // For HDFS, if a file wasn't closed properly last time, 
  // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
  // FSNamesystem.recoverLeaseInternal function gets called. 
  // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
  // created using the same FileSystem object. This is a bug and is being tracked at: 
  // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
  // 
  // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
  // we create a new file system for the timer task so that it does not encounter the bug. 
  
  FileStatus tmpHoplogs[] = FSUtils.listStatus(fs, fs.makeQualified(bucketPath), new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All valid hoplog files must match the regex
      Matcher matcher = patternForTmpHoplog.matcher(file.getName());
      return matcher.matches();
    }
  });
  
  if (tmpHoplogs == null || tmpHoplogs.length == 0) {
    logger.fine("No files to fix");
    return;
  }
  // ping secondaries so that in case of split brain, no other vm has taken up 
  // as primary. #50110. 
  pingSecondaries();
  
  logger.fine("Files to fix " + tmpHoplogs.length);

  String currentHoplogName = null;
  // get the current hoplog name. We need to ignore current hoplog while fixing. 
  if (currentHoplog != null) {
    currentHoplogName = currentHoplog.getFileName();
  }
  
  for (int i = 0; i < tmpHoplogs.length; i++) {
    // Skip directories
    if (tmpHoplogs[i].isDirectory()) {
      continue;
    }

    final Path p = tmpHoplogs[i].getPath();
    
    if (tmpHoplogs[i].getPath().getName().equals(currentHoplogName)){
      if (logger.fineEnabled())
        logger.fine("Skipping current file: " + tmpHoplogs[i].getPath().getName());
      continue;
    } 
    
    SequenceFileHoplog hoplog = new SequenceFileHoplog(fs, p, stats);
    try {
      makeLegitimate(hoplog);
      logger.info (LocalizedStrings.DEBUG, "Hoplog " + p + " was a temporary " +
          "hoplog because the node managing it wasn't shutdown properly last time. Fixed the hoplog name.");
    } catch (IOException e) {
      logger.info (LocalizedStrings.DEBUG, "Hoplog " + p + " is still a temporary " +
          "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
          "change the hoplog name because an exception was thrown while fixing it. " + e);
    }
  }
}
 
Example 10
Source File: HDFSUnsortedHoplogOrganizer.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
/**
 * Fixes the size of hoplogs that were not closed properly last time. 
 * Such hoplogs are *.tmphop files. Identify them and open them and close 
 * them, this fixes the size. After doing this rename them to *.hop. 
 * 
 * @throws IOException
 * @throws ForceReattemptException 
 */
void identifyAndFixTmpHoplogs(FileSystem fs) throws IOException, ForceReattemptException {
  logger.fine("Fixing temporary hoplogs");
  
  // A different filesystem is passed to this function for the following reason: 
  // For HDFS, if a file wasn't closed properly last time, 
  // while calling FileSystem.append for this file, FSNamesystem.startFileInternal->
  // FSNamesystem.recoverLeaseInternal function gets called. 
  // This function throws AlreadyBeingCreatedException if there is an open handle, to any other file, 
  // created using the same FileSystem object. This is a bug and is being tracked at: 
  // https://issues.apache.org/jira/browse/HDFS-3848?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
  // 
  // The fix for this bug is not yet part of Pivotal HD. So to overcome the bug, 
  // we create a new file system for the timer task so that it does not encounter the bug. 
  
  FileStatus tmpHoplogs[] = FSUtils.listStatus(fs, fs.makeQualified(bucketPath), new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All valid hoplog files must match the regex
      Matcher matcher = patternForTmpHoplog.matcher(file.getName());
      return matcher.matches();
    }
  });
  
  if (tmpHoplogs == null || tmpHoplogs.length == 0) {
    logger.fine("No files to fix");
    return;
  }
  // ping secondaries so that in case of split brain, no other vm has taken up 
  // as primary. #50110. 
  pingSecondaries();
  
  logger.fine("Files to fix " + tmpHoplogs.length);

  String currentHoplogName = null;
  // get the current hoplog name. We need to ignore current hoplog while fixing. 
  if (currentHoplog != null) {
    currentHoplogName = currentHoplog.getFileName();
  }
  
  for (int i = 0; i < tmpHoplogs.length; i++) {
    // Skip directories
    if (tmpHoplogs[i].isDirectory()) {
      continue;
    }

    final Path p = tmpHoplogs[i].getPath();
    
    if (tmpHoplogs[i].getPath().getName().equals(currentHoplogName)){
      if (logger.fineEnabled())
        logger.fine("Skipping current file: " + tmpHoplogs[i].getPath().getName());
      continue;
    } 
    
    SequenceFileHoplog hoplog = new SequenceFileHoplog(fs, p, stats);
    try {
      makeLegitimate(hoplog);
      logger.info (LocalizedStrings.DEBUG, "Hoplog " + p + " was a temporary " +
          "hoplog because the node managing it wasn't shutdown properly last time. Fixed the hoplog name.");
    } catch (IOException e) {
      logger.info (LocalizedStrings.DEBUG, "Hoplog " + p + " is still a temporary " +
          "hoplog because the node managing it wasn't shutdown properly last time. Failed to " +
          "change the hoplog name because an exception was thrown while fixing it. " + e);
    }
  }
}