org.apache.hadoop.hbase.wal.WALSplitter Java Examples

The following examples show how to use org.apache.hadoop.hbase.wal.WALSplitter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IOException {
  FileSystem fs = FileSystem.get(c);
  
  List<Path> splits = WALSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"),
      this.oldLogDir, fs, c, walFactory);
  // Split should generate only 1 file since there's only 1 region
  assertEquals("splits=" + splits, 1, splits.size());
  // Make sure the file exists
  assertTrue(fs.exists(splits.get(0)));
  LOG.info("Split file=" + splits.get(0));
  return splits.get(0);
}
 
Example #2
Source File: TestWALObserver.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Path runWALSplit(final Configuration c) throws IOException {
  List<Path> splits = WALSplitter.split(
    hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals);
  // Split should generate only 1 file since there's only 1 region
  assertEquals(1, splits.size());
  // Make sure the file exists
  assertTrue(fs.exists(splits.get(0)));
  LOG.info("Split file=" + splits.get(0));
  return splits.get(0);
}
 
Example #3
Source File: AbstractTestWALReplay.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * testcase for https://issues.apache.org/jira/browse/HBASE-14949.
 */
private void testNameConflictWhenSplit(boolean largeFirst) throws IOException,
    StreamLacksCapabilityException {
  final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
  final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
  final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
  final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
  deleteDir(basedir);

  final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
  NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
  for (byte[] fam : htd.getFamiliesKeys()) {
    scopes.put(fam, 0);
  }
  HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
  HBaseTestingUtility.closeRegionAndWAL(region);
  final byte[] family = htd.getColumnFamilies()[0].getName();
  final byte[] rowName = tableName.getName();
  FSWALEntry entry1 = createFSWALEntry(htd, hri, 1L, rowName, family, ee, mvcc, 1, scopes);
  FSWALEntry entry2 = createFSWALEntry(htd, hri, 2L, rowName, family, ee, mvcc, 2, scopes);

  Path largeFile = new Path(logDir, "wal-1");
  Path smallFile = new Path(logDir, "wal-2");
  writerWALFile(largeFile, Arrays.asList(entry1, entry2));
  writerWALFile(smallFile, Arrays.asList(entry2));
  FileStatus first, second;
  if (largeFirst) {
    first = fs.getFileStatus(largeFile);
    second = fs.getFileStatus(smallFile);
  } else {
    first = fs.getFileStatus(smallFile);
    second = fs.getFileStatus(largeFile);
  }
  WALSplitter.splitLogFile(hbaseRootDir, first, fs, conf, null, null, null, wals, null);
  WALSplitter.splitLogFile(hbaseRootDir, second, fs, conf, null, null, null, wals, null);
  WAL wal = createWAL(this.conf, hbaseRootDir, logName);
  region = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal);
  assertTrue(region.getOpenSeqNum() > mvcc.getWritePoint());
  assertEquals(2, region.get(new Get(rowName)).size());
}
 
Example #4
Source File: AbstractTestWALReplay.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Run the split.  Verify only single split file made.
 * @return The single split file made
 */
private Path runWALSplit(final Configuration c) throws IOException {
  List<Path> splits = WALSplitter.split(
    hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals);
  // Split should generate only 1 file since there's only 1 region
  assertEquals("splits=" + splits, 1, splits.size());
  // Make sure the file exists
  assertTrue(fs.exists(splits.get(0)));
  LOG.info("Split file=" + splits.get(0));
  return splits.get(0);
}
 
Example #5
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IOException {
  FileSystem fs = FileSystem.get(c);
  
  List<Path> splits = WALSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"),
      this.oldLogDir, fs, c, walFactory);
  // Split should generate only 1 file since there's only 1 region
  assertEquals("splits=" + splits, 1, splits.size());
  // Make sure the file exists
  assertTrue(fs.exists(splits.get(0)));
  LOGGER.info("Split file=" + splits.get(0));
  return splits.get(0);
}
 
Example #6
Source File: MasterWalManager.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Inspect the log directory to find dead servers which need recovery work
 * @return A set of ServerNames which aren't running but still have WAL files left in file system
 * @deprecated With proc-v2, we can record the crash server with procedure store, so do not need
 *             to scan the wal directory to find out the splitting wal directory any more. Leave
 *             it here only because {@code RecoverMetaProcedure}(which is also deprecated) uses
 *             it.
 */
@Deprecated
public Set<ServerName> getFailedServersFromLogFolders() throws IOException {
  boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
      WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);

  Set<ServerName> serverNames = new HashSet<>();
  Path logsDirPath = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME);

  do {
    if (services.isStopped()) {
      LOG.warn("Master stopped while trying to get failed servers.");
      break;
    }
    try {
      if (!this.fs.exists(logsDirPath)) return serverNames;
      FileStatus[] logFolders = CommonFSUtils.listStatus(this.fs, logsDirPath, null);
      // Get online servers after getting log folders to avoid log folder deletion of newly
      // checked in region servers . see HBASE-5916
      Set<ServerName> onlineServers = services.getServerManager().getOnlineServers().keySet();

      if (logFolders == null || logFolders.length == 0) {
        LOG.debug("No log files to split, proceeding...");
        return serverNames;
      }
      for (FileStatus status : logFolders) {
        FileStatus[] curLogFiles = CommonFSUtils.listStatus(this.fs, status.getPath(), null);
        if (curLogFiles == null || curLogFiles.length == 0) {
          // Empty log folder. No recovery needed
          continue;
        }
        final ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(
            status.getPath());
        if (null == serverName) {
          LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a " +
              "region server name; leaving in place. If you see later errors about missing " +
              "write ahead logs they may be saved in this location.");
        } else if (!onlineServers.contains(serverName)) {
          LOG.info("Log folder " + status.getPath() + " doesn't belong "
              + "to a known region server, splitting");
          serverNames.add(serverName);
        } else {
          LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
        }
      }
      retrySplitting = false;
    } catch (IOException ioe) {
      LOG.warn("Failed getting failed servers to be recovered.", ioe);
      if (!checkFileSystem()) {
        LOG.warn("Bad Filesystem, exiting");
        Runtime.getRuntime().halt(1);
      }
      try {
        if (retrySplitting) {
          Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
        }
      } catch (InterruptedException e) {
        LOG.warn("Interrupted, aborting since cannot return w/o splitting");
        Thread.currentThread().interrupt();
        retrySplitting = false;
        Runtime.getRuntime().halt(1);
      }
    }
  } while (retrySplitting);

  return serverNames;
}
 
Example #7
Source File: TestWALReplayBoundedLogWriterCreation.java    From hbase with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TestWALReplay.setUpBeforeClass();
  TEST_UTIL.getConfiguration().setBoolean(WALSplitter.SPLIT_WRITER_CREATION_BOUNDED, true);
}
 
Example #8
Source File: TestLogRollAbort.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = ServerName.valueOf("testLogRollAfterSplitStart",
      16010, System.currentTimeMillis()).toString();
  Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
    WAL log = wals.getWAL(regionInfo);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
      scopes.put(Bytes.toBytes("column"), 0);
      log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
        System.currentTimeMillis(), mvcc, scopes), kvs);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((AbstractFSWAL<?>) log).replaceWriter(((FSHLog)log).getOldPath(), null, null);

    // code taken from MasterFileSystem.getLogDirs(), which is called from
    // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process)
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}