Java Code Examples for org.apache.hadoop.hbase.HBaseConfiguration.create()

The following are Jave code examples for showing how to use create() of the org.apache.hadoop.hbase.HBaseConfiguration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: TestIPCUtil.java   Source Code and License Vote up 6 votes
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
 
Example 2
Project: ditb   File: TestWALObserver.java   Source Code and License Vote up 6 votes
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  // this.cluster = TEST_UTIL.getDFSCluster();
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(conf);
  this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
  this.oldLogDir = new Path(this.hbaseRootDir,
      HConstants.HREGION_OLDLOGDIR_NAME);
  this.logDir = new Path(this.hbaseRootDir,
      DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
  this.logName = HConstants.HREGION_LOGDIR_NAME;

  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
Example 3
Project: ditb   File: HTableMultiplexer.java   Source Code and License Vote up 6 votes
/**
 * @param conn The HBase connection.
 * @param conf The HBase configuration
 * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
 *          each region server before dropping the request.
 */
public HTableMultiplexer(Connection conn, Configuration conf,
    int perRegionServerBufferQueueSize) {
  this.conn = (ClusterConnection) conn;
  this.pool = HTable.getDefaultExecutor(conf);
  this.retryNum = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
  this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf);
  this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100);
  int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10);
  this.executor =
      Executors.newScheduledThreadPool(initThreads,
        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build());

  this.workerConf = HBaseConfiguration.create(conf);
  // We do not do the retry because we need to reassign puts to different queues if regions are
  // moved.
  this.workerConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
}
 
Example 4
Project: ditb   File: TestCipherProvider.java   Source Code and License Vote up 5 votes
@Test
public void testDefaultProvider() {
  Configuration conf = HBaseConfiguration.create();
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof DefaultCipherProvider);
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains(algorithm));
  Cipher a = Encryption.getCipher(conf, algorithm);
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof DefaultCipherProvider);
  assertEquals(a.getName(), algorithm);
  assertEquals(a.getKeyLength(), AES.KEY_LENGTH);
}
 
Example 5
Project: ditb   File: TestStripeCompactionPolicy.java   Source Code and License Vote up 5 votes
@Test
public void testExistingStripesFromL0() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 3);
  StripeCompactionPolicy.StripeInformationProvider si = createStripes(3, KEY_A);
  verifyCompaction(
      createPolicy(conf), si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
}
 
Example 6
Project: ditb   File: TestStripeCompactionPolicy.java   Source Code and License Vote up 5 votes
@Test
public void testSplitOffStripe() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  // First test everything with default split count of 2, then split into more.
  conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2);
  Long[] toSplit = new Long[] { defaultSplitSize - 2, 1L, 1L };
  Long[] noSplit = new Long[] { defaultSplitSize - 2, 1L };
  long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount);
  // Don't split if not eligible for compaction.
  StripeCompactionPolicy.StripeInformationProvider si =
      createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 2L });
  assertNull(createPolicy(conf).selectCompaction(si, al(), false));
  // Make sure everything is eligible.
  conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 500f);
  StripeCompactionPolicy policy = createPolicy(conf);
  verifyWholeStripesCompaction(policy, si, 0, 0, null, 2, splitTargetSize);
  // Add some extra stripes...
  si = createStripesWithSizes(0, 0, noSplit, noSplit, toSplit);
  verifyWholeStripesCompaction(policy, si, 2, 2, null, 2, splitTargetSize);
  // In the middle.
  si = createStripesWithSizes(0, 0, noSplit, toSplit, noSplit);
  verifyWholeStripesCompaction(policy, si, 1, 1, null, 2, splitTargetSize);
  // No split-off with different config (larger split size).
  // However, in this case some eligible stripe will just be compacted alone.
  StripeCompactionPolicy specPolicy = createPolicy(
      conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false);
  verifySingleStripeCompaction(specPolicy, si, 1, null);
}
 
Example 7
Project: ditb   File: Export.java   Source Code and License Vote up 5 votes
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  System.exit(job.waitForCompletion(true)? 0 : 1);
}
 
Example 8
Project: ditb   File: TestCipherProvider.java   Source Code and License Vote up 5 votes
@Test
public void testCustomProvider() {
  Configuration conf = HBaseConfiguration.create();
  conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, MyCipherProvider.class.getName());
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof MyCipherProvider);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains("TEST"));
  Cipher a = Encryption.getCipher(conf, "TEST");
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof MyCipherProvider);
  assertEquals(a.getName(), "TEST");
  assertEquals(a.getKeyLength(), 0);
}
 
Example 9
Project: ditb   File: TestRegionObserverBypass.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
Example 10
Project: ditb   File: TestHeapMemoryManager.java   Source Code and License Vote up 5 votes
@Test
public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f);
  HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0),
      new MemstoreFlusherStub(0), new RegionServerStub(conf), new RegionServerAccountingStub());
  assertFalse(manager.isTunerOn());
}
 
Example 11
Project: ditb   File: Compressor.java   Source Code and License Vote up 5 votes
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
Example 12
Project: aliyun-tablestore-hbase-client   File: TestSingleColumnValueFilter.java   Source Code and License Vote up 5 votes
public TestSingleColumnValueFilter() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 13
Project: ditb   File: Import.java   Source Code and License Vote up 5 votes
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  String inputVersionString = System.getProperty(ResultSerialization.IMPORT_FORMAT_VER);
  if (inputVersionString != null) {
    conf.set(ResultSerialization.IMPORT_FORMAT_VER, inputVersionString);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  boolean isJobSuccessful = job.waitForCompletion(true);
  if(isJobSuccessful){
    // Flush all the regions of the table
    flushRegionsIfNecessary(conf);
  }
  long inputRecords = job.getCounters().findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
  long outputRecords = job.getCounters().findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getValue();
  if (outputRecords < inputRecords) {
    System.err.println("Warning, not all records were imported (maybe filtered out).");
    if (outputRecords == 0) {
      System.err.println("If the data was exported from HBase 0.94 "+
          "consider using -Dhbase.import.version=0.94.");
    }
  }

  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example 14
Project: aliyun-tablestore-hbase-client   File: TestToHbaseDelete.java   Source Code and License Vote up 4 votes
public TestToHbaseDelete() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();
    Connection connection = ConnectionFactory.createConnection(config);
    tablestoreColumnMapping = new ColumnMapping("table-1", connection.getConfiguration());
    family = config.get("hbase.client.tablestore.family");
}
 
Example 15
Project: ditb   File: TestBlockCacheReporting.java   Source Code and License Vote up 4 votes
@Before
public void setUp() throws Exception {
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
  this.conf = HBaseConfiguration.create();
}
 
Example 16
Project: ditb   File: TestWALObserver.java   Source Code and License Vote up 4 votes
@Test
public void testNonLegacyWALKeysDoNotExplode() throws Exception {
  TableName tableName = TableName.valueOf(TEST_TABLE);
  final HTableDescriptor htd = createBasic3FamilyHTD(Bytes
      .toString(TEST_TABLE));
  final HRegionInfo hri = new HRegionInfo(tableName, null, null);
  MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();

  fs.mkdirs(new Path(FSUtils.getTableDir(hbaseRootDir, tableName), hri.getEncodedName()));

  final Configuration newConf = HBaseConfiguration.create(this.conf);

  final WAL wal = wals.getWAL(UNSPECIFIED_REGION);
  final SampleRegionWALObserver newApi = getCoprocessor(wal, SampleRegionWALObserver.class);
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  final SampleRegionWALObserver oldApi = getCoprocessor(wal,
      SampleRegionWALObserver.Legacy.class);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("ensuring wal entries haven't happened before we start");
  assertFalse(newApi.isPreWALWriteCalled());
  assertFalse(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("writing to WAL with non-legacy keys.");
  final int countPerFamily = 5;
  for (HColumnDescriptor hcd : htd.getFamilies()) {
    addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        EnvironmentEdgeManager.getDelegate(), wal, htd, mvcc);
  }

  LOG.debug("Verify that only the non-legacy CP saw edits.");
  assertTrue(newApi.isPreWALWriteCalled());
  assertTrue(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  // wish we could test that the log message happened :/
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("reseting cp state.");
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("write a log edit that supports legacy cps.");
  final long now = EnvironmentEdgeManager.currentTime();
  final WALKey legacyKey = new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), now);
  final WALEdit edit = new WALEdit();
  final byte[] nonce = Bytes.toBytes("1772");
  edit.add(new KeyValue(TEST_ROW, TEST_FAMILY[0], nonce, now, nonce));
  final long txid = wal.append(htd, hri, legacyKey, edit, true);
  wal.sync(txid);

  LOG.debug("Make sure legacy cps can see supported edits after having been skipped.");
  assertTrue("non-legacy WALObserver didn't see pre-write.", newApi.isPreWALWriteCalled());
  assertTrue("non-legacy WALObserver didn't see post-write.", newApi.isPostWALWriteCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy pre-write.",
      newApi.isPreWALWriteDeprecatedCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy post-write.",
      newApi.isPostWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see pre-write.", oldApi.isPreWALWriteCalled());
  assertTrue("legacy WALObserver didn't see post-write.", oldApi.isPostWALWriteCalled());
  assertTrue("legacy WALObserver didn't see legacy pre-write.",
      oldApi.isPreWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see legacy post-write.",
      oldApi.isPostWALWriteDeprecatedCalled());
}
 
Example 17
Project: ditb   File: TestHRegion.java   Source Code and License Vote up 4 votes
private void durabilityTest(String method, Durability tableDurability,
    Durability mutationDurability, long timeout, boolean expectAppend, final boolean expectSync,
    final boolean expectSyncFromLogSyncer) throws Exception {
  Configuration conf = HBaseConfiguration.create(CONF);
  method = method + "_" + tableDurability.name() + "_" + mutationDurability.name();
  TableName tableName = TableName.valueOf(method);
  byte[] family = Bytes.toBytes("family");
  Path logDir = new Path(new Path(dir + method), "log");
  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, logDir);
  final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
  final WAL wal = spy(wals.getWAL(tableName.getName()));
  this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
      HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, wal,
      new byte[][] { family });

  Put put = new Put(Bytes.toBytes("r1"));
  put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
  put.setDurability(mutationDurability);
  region.put(put);

  //verify append called or not
  verify(wal, expectAppend ? times(1) : never())
    .append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(),
        (WALEdit)any(), Mockito.anyBoolean());

  // verify sync called or not
  if (expectSync || expectSyncFromLogSyncer) {
    TEST_UTIL.waitFor(timeout, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        try {
          if (expectSync) {
            verify(wal, times(1)).sync(anyLong()); // Hregion calls this one
          } else if (expectSyncFromLogSyncer) {
            verify(wal, times(1)).sync(); // wal syncer calls this one
          }
        } catch (Throwable ignore) {
        }
        return true;
      }
    });
  } else {
    //verify(wal, never()).sync(anyLong());
    verify(wal, never()).sync();
  }

  HRegion.closeHRegion(this.region);
  this.region = null;
}
 
Example 18
Project: ditb   File: TestSimpleRpcScheduler.java   Source Code and License Vote up 4 votes
private void testRpcScheduler(final String queueType) throws Exception {
  Configuration schedConf = HBaseConfiguration.create();
  schedConf.set(SimpleRpcScheduler.CALL_QUEUE_TYPE_CONF_KEY, queueType);

  PriorityFunction priority = mock(PriorityFunction.class);
  when(priority.getPriority(any(RequestHeader.class),
    any(Message.class), any(User.class)))
    .thenReturn(HConstants.NORMAL_QOS);

  RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 1, 1, 1, priority,
                                                  HConstants.QOS_THRESHOLD);
  try {
    scheduler.start();

    CallRunner smallCallTask = mock(CallRunner.class);
    RpcServer.Call smallCall = mock(RpcServer.Call.class);
    RequestHeader smallHead = RequestHeader.newBuilder().setCallId(1).build();
    when(smallCallTask.getCall()).thenReturn(smallCall);
    when(smallCall.getHeader()).thenReturn(smallHead);

    CallRunner largeCallTask = mock(CallRunner.class);
    RpcServer.Call largeCall = mock(RpcServer.Call.class);
    RequestHeader largeHead = RequestHeader.newBuilder().setCallId(50).build();
    when(largeCallTask.getCall()).thenReturn(largeCall);
    when(largeCall.getHeader()).thenReturn(largeHead);

    CallRunner hugeCallTask = mock(CallRunner.class);
    RpcServer.Call hugeCall = mock(RpcServer.Call.class);
    RequestHeader hugeHead = RequestHeader.newBuilder().setCallId(100).build();
    when(hugeCallTask.getCall()).thenReturn(hugeCall);
    when(hugeCall.getHeader()).thenReturn(hugeHead);

    when(priority.getDeadline(eq(smallHead), any(Message.class))).thenReturn(0L);
    when(priority.getDeadline(eq(largeHead), any(Message.class))).thenReturn(50L);
    when(priority.getDeadline(eq(hugeHead), any(Message.class))).thenReturn(100L);

    final ArrayList<Integer> work = new ArrayList<Integer>();
    doAnswerTaskExecution(smallCallTask, work, 10, 250);
    doAnswerTaskExecution(largeCallTask, work, 50, 250);
    doAnswerTaskExecution(hugeCallTask, work, 100, 250);

    scheduler.dispatch(smallCallTask);
    scheduler.dispatch(smallCallTask);
    scheduler.dispatch(smallCallTask);
    scheduler.dispatch(hugeCallTask);
    scheduler.dispatch(smallCallTask);
    scheduler.dispatch(largeCallTask);
    scheduler.dispatch(smallCallTask);
    scheduler.dispatch(smallCallTask);

    while (work.size() < 8) {
      Threads.sleepWithoutInterrupt(100);
    }

    int seqSum = 0;
    int totalTime = 0;
    for (int i = 0; i < work.size(); ++i) {
      LOG.debug("Request i=" + i + " value=" + work.get(i));
      seqSum += work.get(i);
      totalTime += seqSum;
    }
    LOG.debug("Total Time: " + totalTime);

    // -> [small small small huge small large small small]
    // -> NO REORDER   [10 10 10 100 10 50 10 10] -> 930 (FIFO Queue)
    // -> WITH REORDER [10 10 10 10 10 10 50 100] -> 530 (Deadline Queue)
    if (queueType.equals(SimpleRpcScheduler.CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
      assertEquals(530, totalTime);
    } else /* if (queueType.equals(SimpleRpcScheduler.CALL_QUEUE_TYPE_FIFO_CONF_VALUE)) */ {
      assertEquals(930, totalTime);
    }
  } finally {
    scheduler.stop();
  }
}
 
Example 19
Project: ditb   File: TestMasterProcedureScheduler.java   Source Code and License Vote up 4 votes
@Before
public void setUp() throws IOException {
  conf = HBaseConfiguration.create();
  queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
}
 
Example 20
Project: ditb   File: TestPerColumnFamilyFlush.java   Source Code and License Vote up 4 votes
@Test(timeout = 180000)
public void testSelectiveFlushWhenNotEnabled() throws IOException {
  // Set up the configuration
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024);
  conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllStoresPolicy.class.getName());

  // Intialize the HRegion
  initHRegion("testSelectiveFlushWhenNotEnabled", conf);
  // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
  for (int i = 1; i <= 1200; i++) {
    region.put(createPut(1, i));
    if (i <= 100) {
      region.put(createPut(2, i));
      if (i <= 50) {
        region.put(createPut(3, i));
      }
    }
  }

  long totalMemstoreSize = region.getMemstoreSize();

  // Find the sizes of the memstores of each CF.
  long cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
  long cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
  long cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();

  // Some other sanity checks.
  assertTrue(cf1MemstoreSize > 0);
  assertTrue(cf2MemstoreSize > 0);
  assertTrue(cf3MemstoreSize > 0);

  // The total memstore size should be the same as the sum of the sizes of
  // memstores of CF1, CF2 and CF3.
  assertEquals(totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD, cf1MemstoreSize
      + cf2MemstoreSize + cf3MemstoreSize);

  // Flush!
  region.flush(false);

  cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
  cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
  cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
  totalMemstoreSize = region.getMemstoreSize();
  long smallestSeqInRegionCurrentMemstore = ((HRegion)region).getWAL()
      .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());

  // Everything should have been cleared
  assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf1MemstoreSize);
  assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSize);
  assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf3MemstoreSize);
  assertEquals(0, totalMemstoreSize);
  assertEquals(HConstants.NO_SEQNUM, smallestSeqInRegionCurrentMemstore);
}