Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#getDataTestDir()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#getDataTestDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMemStoreSegmentsIterator.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  Configuration conf = new Configuration();
  HBaseTestingUtility hbaseUtility = new HBaseTestingUtility(conf);
  TableDescriptorBuilder tableDescriptorBuilder =
    TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE));
  ColumnFamilyDescriptor columnFamilyDescriptor =
    ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)).build();
  tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);

  RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE)).build();
  Path rootPath = hbaseUtility.getDataTestDir(ROOT_SUB_PATH);
  this.wal = HBaseTestingUtility.createWal(conf, rootPath, info);
  this.region = HRegion.createHRegion(info, rootPath, conf,
    tableDescriptorBuilder.build(), this.wal, true);
  this.store = new HStore(this.region, columnFamilyDescriptor, conf, false);
  this.comparator = CellComparator.getInstance();
  this.compactionKVMax = HConstants.COMPACTION_KV_MAX_DEFAULT;
}
 
Example 2
Source File: TestCatalogJanitor.java    From hbase with Apache License 2.0 5 votes vote down vote up
private String setRootDirAndCleanIt(final HBaseTestingUtility htu, final String subdir)
  throws IOException {
  Path testdir = htu.getDataTestDir(subdir);
  FileSystem fs = FileSystem.get(htu.getConfiguration());
  if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
  CommonFSUtils.setRootDir(htu.getConfiguration(), testdir);
  return CommonFSUtils.getRootDir(htu.getConfiguration()).toString();
}
 
Example 3
Source File: TestSnapshotManifest.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();

  rootDir = TEST_UTIL.getDataTestDir(TABLE_NAME_STR);
  fs = TEST_UTIL.getTestFileSystem();
  conf = TEST_UTIL.getConfiguration();

  SnapshotTestingUtils.SnapshotMock snapshotMock =
    new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir);
  builder = snapshotMock.createSnapshotV2("snapshot", TABLE_NAME_STR, 0);
  snapshotDir = builder.commit();
  snapshotDesc = builder.getSnapshotDescription();
}
 
Example 4
Source File: TestDefaultMemStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testShouldFlushMeta() throws Exception {
  // write an edit in the META and ensure the shouldFlush (that the periodic memstore
  // flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
  // the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
  Configuration conf = new Configuration();
  conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
  HBaseTestingUtility hbaseUtility = new HBaseTestingUtility(conf);
  Path testDir = hbaseUtility.getDataTestDir();
  EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
  EnvironmentEdgeManager.injectEdge(edge);
  edge.setCurrentTimeMillis(1234);
  WALFactory wFactory = new WALFactory(conf, "1234");
  TableDescriptors tds = new FSTableDescriptors(conf);
  FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
  HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
      conf, tds.get(TableName.META_TABLE_NAME),
      wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
  // parameterized tests add [#] suffix get rid of [ and ].
  TableDescriptor desc = TableDescriptorBuilder
      .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")))
      .setColumnFamily(ColumnFamilyDescriptorBuilder.of("foo")).build();
  RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName())
      .setStartKey(Bytes.toBytes("row_0200")).setEndKey(Bytes.toBytes("row_0300")).build();
  HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri));
  addRegionToMETA(meta, r);
  edge.setCurrentTimeMillis(1234 + 100);
  StringBuilder sb = new StringBuilder();
  assertTrue(meta.shouldFlush(sb) == false);
  edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
  assertTrue(meta.shouldFlush(sb) == true);
}
 
Example 5
Source File: TestRegionInfo.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
  Path basedir = htu.getDataTestDir();
  // Create a region.  That'll write the .regioninfo file.
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
  FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration());
  HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
      fsTableDescriptors.get(TableName.META_TABLE_NAME));
  // Get modtime on the file.
  long modtime = getModTime(r);
  HBaseTestingUtility.closeRegionAndWAL(r);
  Thread.sleep(1001);
  r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
      null, htu.getConfiguration());
  // Ensure the file is not written for a second time.
  long modtime2 = getModTime(r);
  assertEquals(modtime, modtime2);
  // Now load the file.
  org.apache.hadoop.hbase.client.RegionInfo deserializedHri =
    HRegionFileSystem.loadRegionInfoFileContent(
      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
  assertEquals(0,
    org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri));
  HBaseTestingUtility.closeRegionAndWAL(r);
}
 
Example 6
Source File: TestPriorityRpc.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  conf = HBaseConfiguration.create();
  conf.setBoolean("hbase.testing.nocluster", true); // No need to do ZK
  final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.getDataTestDir(this.getClass().getName());
  regionServer = HRegionServer.constructRegionServer(HRegionServer.class, conf);
  priority = regionServer.rpcServices.getPriority();
}
 
Example 7
Source File: TestCacheConfig.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFileBucketCacheConfig() throws IOException {
  HBaseTestingUtility htu = new HBaseTestingUtility(this.conf);
  try {
    Path p = new Path(htu.getDataTestDir(), "bc.txt");
    FileSystem fs = FileSystem.get(this.conf);
    fs.create(p).close();
    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
    doBucketCacheConfigTest();
  } finally {
    htu.cleanupTestDir();
  }
}
 
Example 8
Source File: TestVerifyBucketCacheFile.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test whether BucketCache is started normally after modifying the cache file.
 * Start BucketCache and add some blocks, then shutdown BucketCache and persist cache to file.
 * Restart BucketCache after modify cache file's data, and it can't restore cache from file,
 * the cache file and persistence file would be deleted before BucketCache start normally.
 * @throws Exception the exception
 */
@Test
public void testModifiedBucketCacheFileData() throws Exception {
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  Path testDir = TEST_UTIL.getDataTestDir();
  TEST_UTIL.getTestFileSystem().mkdirs(testDir);

  BucketCache bucketCache =
    new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize,
      constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
  long usedSize = bucketCache.getAllocator().getUsedSize();
  assertEquals(0, usedSize);

  CacheTestUtils.HFileBlockPair[] blocks =
    CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1);
  // Add blocks
  for (CacheTestUtils.HFileBlockPair block : blocks) {
    cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock());
  }
  usedSize = bucketCache.getAllocator().getUsedSize();
  assertNotEquals(0, usedSize);
  // persist cache to file
  bucketCache.shutdown();

  // modified bucket cache file
  String file = testDir + "/bucket.cache";
  try(BufferedWriter out = new BufferedWriter(new OutputStreamWriter(
    new FileOutputStream(file, false)))) {
    out.write("test bucket cache");
  }
  // can't restore cache from file
  bucketCache =
    new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize,
      constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
  assertEquals(0, bucketCache.getAllocator().getUsedSize());
  assertEquals(0, bucketCache.backingMap.size());

  TEST_UTIL.cleanupTestDir();
}
 
Example 9
Source File: TestExportSnapshot.java    From hbase with Apache License 2.0 5 votes vote down vote up
static Path getLocalDestinationDir(HBaseTestingUtility htu) {
  Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis());
  try {
    FileSystem fs = FileSystem.getLocal(htu.getConfiguration());
    LOG.info("Local export destination path: " + path);
    return path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
Example 10
Source File: TestStoreFileRefresherChore.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws IOException {
  TEST_UTIL = new HBaseTestingUtility();
  testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore");
  CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir);
}
 
Example 11
Source File: TestResettingCounters.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testResettingCounters() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  byte [] table = Bytes.toBytes(name.getMethodName());
  byte [][] families = new byte [][] {
      Bytes.toBytes("family1"),
      Bytes.toBytes("family2"),
      Bytes.toBytes("family3")
  };
  int numQualifiers = 10;
  byte [][] qualifiers = new byte [numQualifiers][];
  for (int i=0; i<numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
  int numRows = 10;
  byte [][] rows = new byte [numRows][];
  for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i);

  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(table));
  for (byte[] family : families) {
    tableDescriptor.setColumnFamily(
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
  }

  RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
  String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
  Path path = new Path(testDir);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, tableDescriptor);
  try {
    Increment odd = new Increment(rows[0]);
    odd.setDurability(Durability.SKIP_WAL);
    Increment even = new Increment(rows[0]);
    even.setDurability(Durability.SKIP_WAL);
    Increment all = new Increment(rows[0]);
    all.setDurability(Durability.SKIP_WAL);
    for (int i=0;i<numQualifiers;i++) {
      if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
      else odd.addColumn(families[0], qualifiers[i], 1);
      all.addColumn(families[0], qualifiers[i], 1);
    }

    // increment odd qualifiers 5 times and flush
    for (int i=0;i<5;i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE);
    region.flush(true);

    // increment even qualifiers 5 times
    for (int i=0;i<5;i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE);

    // increment all qualifiers, should have value=6 for all
    Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE);
    assertEquals(numQualifiers, result.size());
    Cell[] kvs = result.rawCells();
    for (int i=0;i<kvs.length;i++) {
      System.out.println(kvs[i].toString());
      assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
      assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
    }
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(region);
  }
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 12
Source File: TestCompactionArchiveConcurrentClose.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() throws Exception {
  testUtil = new HBaseTestingUtility();
  testDir = testUtil.getDataTestDir("TestStoreFileRefresherChore");
  CommonFSUtils.setRootDir(testUtil.getConfiguration(), testDir);
}
 
Example 13
Source File: TestBlocksScanned.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  conf = TEST_UTIL.getConfiguration();
  testDir = TEST_UTIL.getDataTestDir("TestBlocksScanned");
}
 
Example 14
Source File: TestHFileOutputFormat2.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testBlockStoragePolicy() throws Exception {
  util = new HBaseTestingUtility();
  Configuration conf = util.getConfiguration();
  conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD");

  conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX +
          Bytes.toString(HFileOutputFormat2.combineTableNameSuffix(
                  TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD");
  Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0]));
  Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1]));
  util.startMiniDFSCluster(3);
  FileSystem fs = util.getDFSCluster().getFileSystem();
  try {
    fs.mkdirs(cf1Dir);
    fs.mkdirs(cf2Dir);

    // the original block storage policy would be HOT
    String spA = getStoragePolicyName(fs, cf1Dir);
    String spB = getStoragePolicyName(fs, cf2Dir);
    LOG.debug("Storage policy of cf 0: [" + spA + "].");
    LOG.debug("Storage policy of cf 1: [" + spB + "].");
    assertEquals("HOT", spA);
    assertEquals("HOT", spB);

    // alter table cf schema to change storage policies
    HFileOutputFormat2.configureStoragePolicy(conf, fs,
            HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir);
    HFileOutputFormat2.configureStoragePolicy(conf, fs,
            HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir);
    spA = getStoragePolicyName(fs, cf1Dir);
    spB = getStoragePolicyName(fs, cf2Dir);
    LOG.debug("Storage policy of cf 0: [" + spA + "].");
    LOG.debug("Storage policy of cf 1: [" + spB + "].");
    assertNotNull(spA);
    assertEquals("ONE_SSD", spA);
    assertNotNull(spB);
    assertEquals("ALL_SSD", spB);
  } finally {
    fs.delete(cf1Dir, true);
    fs.delete(cf2Dir, true);
    util.shutdownMiniDFSCluster();
  }
}