Java Code Examples for org.apache.hadoop.hbase.wal.WALFactory#close()

The following examples show how to use org.apache.hadoop.hbase.wal.WALFactory#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractTestLogRolling.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that log rolling doesn't hang when no data is written.
 */
@Test
public void testLogRollOnNothingWritten() throws Exception {
  final Configuration conf = TEST_UTIL.getConfiguration();
  final WALFactory wals =
    new WALFactory(conf, ServerName.valueOf("test.com", 8080, 1).toString());
  final WAL newLog = wals.getWAL(null);
  try {
    // Now roll the log before we write anything.
    newLog.rollWriter(true);
  } finally {
    wals.close();
  }
}
 
Example 2
Source File: TestHRegionWithInMemoryFlush.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * A test case of HBASE-21041
 * @throws Exception Exception
 */
@Override
@Test
public void testFlushAndMemstoreSizeCounting() throws Exception {
  byte[] family = Bytes.toBytes("family");
  this.region = initHRegion(tableName, method, CONF, family);
  final WALFactory wals = new WALFactory(CONF, method);
  int count = 0;
  try {
    for (byte[] row : HBaseTestingUtility.ROWS) {
      Put put = new Put(row);
      put.addColumn(family, family, row);
      region.put(put);
      //In memory flush every 1000 puts
      if (count++ % 1000 == 0) {
        ((CompactingMemStore) (region.getStore(family).memstore))
            .flushInMemory();
      }
    }
    region.flush(true);
    // After flush, data size should be zero
    Assert.assertEquals(0, region.getMemStoreDataSize());
    // After flush, a new active mutable segment is created, so the heap size
    // should equal to MutableSegment.DEEP_OVERHEAD
    Assert.assertEquals(MutableSegment.DEEP_OVERHEAD, region.getMemStoreHeapSize());
    // After flush, offheap size should be zero
    Assert.assertEquals(0, region.getMemStoreOffHeapSize());

  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
    this.region = null;
    wals.close();
  }
}
 
Example 3
Source File: TestLogRollingNoCluster.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Spin up a bunch of threads and have them all append to a WAL.  Roll the
 * WAL frequently to try and trigger NPE.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testContendedLogRolling() throws Exception {
  TEST_UTIL.startMiniDFSCluster(3);
  Path dir = TEST_UTIL.getDataTestDirOnTestFS();

  // The implementation needs to know the 'handler' count.
  TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, NUM_THREADS);
  final Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  conf.set(WALFactory.WAL_PROVIDER, "filesystem");
  CommonFSUtils.setRootDir(conf, dir);
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(TEST_UTIL.getConfiguration());
  FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
  TableDescriptor metaTableDescriptor = fsTableDescriptors.get(TableName.META_TABLE_NAME);
  conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
  final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName());
  final WAL wal = wals.getWAL(null);

  Appender [] appenders = null;

  final int numThreads = NUM_THREADS;
  appenders = new Appender[numThreads];
  try {
    for (int i = 0; i < numThreads; i++) {
      // Have each appending thread write 'count' entries
      appenders[i] = new Appender(metaTableDescriptor, wal, i, NUM_ENTRIES);
    }
    for (int i = 0; i < numThreads; i++) {
      appenders[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
      //ensure that all threads are joined before closing the wal
      appenders[i].join();
    }
  } finally {
    wals.close();
  }
  for (int i = 0; i < numThreads; i++) {
    assertFalse("Error: " + appenders[i].getException(), appenders[i].isException());
  }
  TEST_UTIL.shutdownMiniDFSCluster();
}
 
Example 4
Source File: TestLogRollAbort.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = ServerName.valueOf("testLogRollAfterSplitStart",
      16010, System.currentTimeMillis()).toString();
  Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
    WAL log = wals.getWAL(regionInfo);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
      scopes.put(Bytes.toBytes("column"), 0);
      log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
        System.currentTimeMillis(), mvcc, scopes), kvs);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((AbstractFSWAL<?>) log).replaceWriter(((FSHLog)log).getOldPath(), null, null);

    // code taken from MasterFileSystem.getLogDirs(), which is called from
    // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process)
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}