Java Code Examples for org.apache.hadoop.hbase.HBaseConfiguration#create()

The following examples show how to use org.apache.hadoop.hbase.HBaseConfiguration#create() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHbaseClient.java    From Kylin with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
    foo(6, 5);
    foo(5, 2);
    foo(3, 0);

    Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "hbase_host");
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    HTable table = new HTable(conf, "test1");
    Put put = new Put(Bytes.toBytes("row1"));

    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));
    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2"));

    table.put(put);
    table.close();
}
 
Example 2
Source File: PersistentBloomTrackerCreator.java    From metron with Apache License 2.0 6 votes vote down vote up
@Override
public AccessTracker create(Map<String, Object> config, TableProvider provider) throws IOException {
  Config patConfig = new Config(config);
  String hbaseTable = patConfig.getHBaseTable();
  int expectedInsertions = patConfig.getExpectedInsertions();
  double falsePositives = patConfig.getFalsePositiveRate();
  long millisecondsBetweenPersist = patConfig.getMillisecondsBetweenPersists();
  BloomAccessTracker bat = new BloomAccessTracker(hbaseTable, expectedInsertions, falsePositives);
  Configuration hbaseConfig = HBaseConfiguration.create();

  AccessTracker ret = new PersistentAccessTracker( hbaseTable
                                                 , UUID.randomUUID().toString()
                                                 , provider.getTable(hbaseConfig, hbaseTable)
                                                 , patConfig.getHBaseCF()
                                                 , bat
                                                 , millisecondsBetweenPersist
                                                 );
  return ret;
}
 
Example 3
Source File: AbstractTestIPC.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Ensure we do not HAVE TO HAVE a codec.
 */
@Test
public void testNoCodec() throws IOException, ServiceException {
  Configuration conf = HBaseConfiguration.create();
  RpcServer rpcServer = createRpcServer(null, "testRpcServer",
      Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(
          SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
      new FifoRpcScheduler(CONF, 1));
  try (AbstractRpcClient<?> client = createRpcClientNoCodec(conf)) {
    rpcServer.start();
    BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
    HBaseRpcController pcrc = new HBaseRpcControllerImpl();
    String message = "hello";
    assertEquals(message,
      stub.echo(pcrc, EchoRequestProto.newBuilder().setMessage(message).build()).getMessage());
    assertNull(pcrc.cellScanner());
  } finally {
    rpcServer.stop();
  }
}
 
Example 4
Source File: InvalidListPruneTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void startMiniCluster() throws Exception {
  // Setup the configuration to start HBase cluster with the invalid list pruning enabled
  conf = HBaseConfiguration.create();
  conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true);
  // Flush prune data to table quickly, so that tests don't need have to wait long to see updates
  conf.setLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, 0L);
  AbstractHBaseTableTest.startMiniCluster();

  TransactionStateStorage txStateStorage = new InMemoryTransactionStateStorage();
  TransactionManager txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector());
  txManager.startAndWait();

  // Do some transactional data operations
  txDataTable1 = TableName.valueOf("invalidListPruneTestTable1");
  Table hTable = createTable(txDataTable1.getName(), new byte[][]{family}, false,
                              Collections.singletonList(TestTransactionProcessor.class.getName()));
  try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, TxConstants.ConflictDetection.ROW)) {
    TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable);
    txContext.start();
    for (int i = 0; i < MAX_ROWS; ++i) {
      txTable.put(new Put(Bytes.toBytes(i)).addColumn(family, qualifier, Bytes.toBytes(i)));
    }
    txContext.finish();
  }

  testUtil.flush(txDataTable1);
  txManager.stopAndWait();

  pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                               TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  dataJanitorState =
    new DataJanitorState(new DataJanitorState.TableSupplier() {
      @Override
      public Table get() throws IOException {
        return testUtil.getConnection().getTable(pruneStateTable);
      }
    });

}
 
Example 5
Source File: IntegrationTestImportTsv.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  util = new IntegrationTestingUtility(conf);
  int status = ToolRunner.run(conf, new IntegrationTestImportTsv(), args);
  System.exit(status);
}
 
Example 6
Source File: TestHStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
  final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
  long anyValue = 10;

  // We'll check that it uses correct config and propagates it appropriately by going thru
  // the simplest "real" path I can find - "throttleCompaction", which just checks whether
  // a number we pass in is higher than some config value, inside compactionPolicy.
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(CONFIG_KEY, anyValue);
  init(name.getMethodName() + "-xml", conf);
  assertTrue(store.throttleCompaction(anyValue + 1));
  assertFalse(store.throttleCompaction(anyValue));

  // HTD overrides XML.
  --anyValue;
  init(name.getMethodName() + "-htd", conf, TableDescriptorBuilder
      .newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY, Long.toString(anyValue)),
    ColumnFamilyDescriptorBuilder.of(family));
  assertTrue(store.throttleCompaction(anyValue + 1));
  assertFalse(store.throttleCompaction(anyValue));

  // HCD overrides them both.
  --anyValue;
  init(name.getMethodName() + "-hcd", conf,
    TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY,
      Long.toString(anyValue)),
    ColumnFamilyDescriptorBuilder.newBuilder(family).setValue(CONFIG_KEY, Long.toString(anyValue))
        .build());
  assertTrue(store.throttleCompaction(anyValue + 1));
  assertFalse(store.throttleCompaction(anyValue));
}
 
Example 7
Source File: PreUpgradeValidator.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
  int ret;

  Configuration conf = HBaseConfiguration.create();

  try {
    ret = ToolRunner.run(conf, new PreUpgradeValidator(), args);
  } catch (Exception e) {
    LOG.error("Error running command-line tool", e);
    ret = AbstractHBaseTool.EXIT_FAILURE;
  }

  System.exit(ret);
}
 
Example 8
Source File: TaxiiIntegrationTest.java    From metron with Apache License 2.0 5 votes vote down vote up
@Test
public void testCommandLine() throws Exception {
    Configuration conf = HBaseConfiguration.create();

    String[] argv = {"-c connection.json", "-e extractor.json", "-n enrichment_config.json", "-l log4j", "-p 10", "-b 04/14/2016 12:00:00"};
    String[] otherArgs = new GenericOptionsParser(conf, argv).getRemainingArgs();

    CommandLine cli = TaxiiLoader.TaxiiOptions.parse(new PosixParser(), otherArgs);
    assertEquals(extractorJson,TaxiiLoader.TaxiiOptions.EXTRACTOR_CONFIG.get(cli).trim());
    assertEquals(connectionConfig, TaxiiLoader.TaxiiOptions.CONNECTION_CONFIG.get(cli).trim());
    assertEquals(beginTime,TaxiiLoader.TaxiiOptions.BEGIN_TIME.get(cli).trim());
    assertEquals(enrichmentJson,TaxiiLoader.TaxiiOptions.ENRICHMENT_CONFIG.get(cli).trim());
    assertEquals(timeInteval,TaxiiLoader.TaxiiOptions.TIME_BETWEEN_POLLS.get(cli).trim());
    assertEquals(log4jProperty, TaxiiLoader.TaxiiOptions.LOG4J_PROPERTIES.get(cli).trim());
}
 
Example 9
Source File: TestHMobStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitFile() throws Exception {
  final Configuration conf = HBaseConfiguration.create();
  init(name.getMethodName(), conf, true);
  String targetPathName = MobUtils.formatDate(new Date());
  Path targetPath = new Path(store.getPath(), (targetPathName
      + Path.SEPARATOR + mobFilePath.getName()));
  fs.delete(targetPath, true);
  Assert.assertFalse(fs.exists(targetPath));
  //commit file
  store.commitFile(mobFilePath, targetPath);
  Assert.assertTrue(fs.exists(targetPath));
}
 
Example 10
Source File: InvalidListPruneTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void startMiniCluster() throws Exception {
  // Setup the configuration to start HBase cluster with the invalid list pruning enabled
  conf = HBaseConfiguration.create();
  conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true);
  // Flush prune data to table quickly, so that tests don't need have to wait long to see updates
  conf.setLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, 0L);
  AbstractHBaseTableTest.startMiniCluster();

  TransactionStateStorage txStateStorage = new InMemoryTransactionStateStorage();
  TransactionManager txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector());
  txManager.startAndWait();

  // Do some transactional data operations
  txDataTable1 = TableName.valueOf("invalidListPruneTestTable1");
  HTable hTable = createTable(txDataTable1.getName(), new byte[][]{family}, false,
                              Collections.singletonList(TestTransactionProcessor.class.getName()));
  try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, TxConstants.ConflictDetection.ROW)) {
    TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable);
    txContext.start();
    for (int i = 0; i < MAX_ROWS; ++i) {
      txTable.put(new Put(Bytes.toBytes(i)).addColumn(family, qualifier, Bytes.toBytes(i)));
    }
    txContext.finish();
  }

  testUtil.flush(txDataTable1);
  txManager.stopAndWait();

  pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                               TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  dataJanitorState =
    new DataJanitorState(new DataJanitorState.TableSupplier() {
      @Override
      public Table get() throws IOException {
        return testUtil.getConnection().getTable(pruneStateTable);
      }
    });

}
 
Example 11
Source File: TableBuilder.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  

  byte[] columnFamily = Bytes.toBytes("f");

  String tableName = "t";

  try {
    ZKUtil.applyClusterKeyToConf(conf, "edh1:2181:/hbase");
    HBaseAdmin hba = new HBaseAdmin(conf);
    if (hba.tableExists(tableName)) {
      hba.disableTable(tableName);
      hba.deleteTable(tableName);
    }
    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
    HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily);
    columnDescriptor.setMaxVersions(1);
    columnDescriptor.setBloomFilterType(BloomType.ROW);
    tableDescriptor.addFamily(columnDescriptor);
    hba.createTable(tableDescriptor);
    hba.close();
  } catch (IOException e) {
    e.printStackTrace();
  }

}
 
Example 12
Source File: TableInputFormat.java    From stratosphere with Apache License 2.0 5 votes vote down vote up
/**
 * Create an {@link HTable} instance and set it into this format.
 * 
 * @param parameters
 *        a {@link Configuration} that holds at least the table name.
 */
protected HTable createTable(Configuration parameters) {
	String configLocation = parameters.getString(TableInputFormat.CONFIG_LOCATION, null);
	LOG.info("Got config location: " + configLocation);
	if (configLocation != null)
	{
		org.apache.hadoop.conf.Configuration dummyConf = new org.apache.hadoop.conf.Configuration();
		if(OperatingSystem.isWindows()) {
			dummyConf.addResource(new Path("file:/" + configLocation));
		} else {
			dummyConf.addResource(new Path("file://" + configLocation));
		}
		hConf = HBaseConfiguration.create(dummyConf);
		;
		// hConf.set("hbase.master", "im1a5.internetmemory.org");
		LOG.info("hbase master: " + hConf.get("hbase.master"));
		LOG.info("zookeeper quorum: " + hConf.get("hbase.zookeeper.quorum"));

	}
	String tableName = parameters.getString(INPUT_TABLE, "");
	try {
		return new HTable(this.hConf, tableName);
	} catch (Exception e) {
		LOG.error(StringUtils.stringifyException(e));
	}
	return null;
}
 
Example 13
Source File: TestNettyRpcConnection.java    From hbase with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setUp() throws IOException {
  CLIENT = new NettyRpcClient(HBaseConfiguration.create());
  CONN = new NettyRpcConnection(CLIENT,
    new ConnectionId(User.getCurrent(), "test", new InetSocketAddress("localhost", 1234)));
}
 
Example 14
Source File: HBaseAccessorWithFilter.java    From pxf with Apache License 2.0 4 votes vote down vote up
private void openTable() throws IOException {
    table = new HTable(HBaseConfiguration.create(configuration), context.getDataSource().getBytes());
}
 
Example 15
Source File: HBaseServerTestInstance.java    From Halyard with Apache License 2.0 4 votes vote down vote up
public static synchronized Configuration getInstanceConfig() throws Exception {
    if (conf == null) {
        File zooRoot = File.createTempFile("hbase-zookeeper", "");
        zooRoot.delete();
        ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000);
        ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000);
        factory.startup(zookeper);

        YarnConfiguration yconf = new YarnConfiguration();
        String argLine = System.getProperty("argLine");
        if (argLine != null) {
            yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec"));
        }
        yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
        yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
        MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster");
        miniCluster.init(yconf);
        String resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
        miniCluster.start();
        miniCluster.waitForNodeManagersToConnect(10000);
        // following condition set in MiniYarnCluster:273
        while (resourceManagerLink.endsWith(":0")) {
            Thread.sleep(100);
            resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        }

        File hbaseRoot = File.createTempFile("hbase-root", "");
        hbaseRoot.delete();
        conf = HBaseConfiguration.create(miniCluster.getConfig());
        conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString());
        conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort());
        conf.set("hbase.master.hostname", "localhost");
        conf.set("hbase.regionserver.hostname", "localhost");
        conf.setInt("hbase.master.info.port", -1);
        conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString());
        LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
        cluster.startup();
    }
    return new Configuration(conf);
}
 
Example 16
Source File: HBase10ConfigurationProvider.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Override
public Configuration get() {
  return HBaseConfiguration.create();
}
 
Example 17
Source File: TestWALEntrySinkFilter.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test filter. Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many
 * items we filter out and we count how many cells make it through for distribution way down below
 * in the Table#batch implementation. Puts in place a custom DevNullConnection so we can insert
 * our counting Table.
 * @throws IOException
 */
@Test
public void testWALEntryFilter() throws IOException {
  Configuration conf = HBaseConfiguration.create();
  // Make it so our filter is instantiated on construction of ReplicationSink.
  conf.setClass(DummyConnectionRegistry.REGISTRY_IMPL_CONF_KEY, DevNullConnectionRegistry.class,
      DummyConnectionRegistry.class);
  conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY,
      IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
  conf.setClass(ClusterConnectionFactory.HBASE_SERVER_CLUSTER_CONNECTION_IMPL,
      DevNullAsyncClusterConnection.class, AsyncClusterConnection.class);
  ReplicationSink sink = new ReplicationSink(conf, STOPPABLE);
  // Create some dumb walentries.
  List<AdminProtos.WALEntry> entries = new ArrayList<>();
  AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
  // Need a tablename.
  ByteString tableName =
    ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
  // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos
  // describing the edit with all Cells from all edits aggregated in a single CellScanner.
  final List<Cell> cells = new ArrayList<>();
  int count = BOUNDARY * 2;
  for (int i = 0; i < count; i++) {
    byte[] bytes = Bytes.toBytes(i);
    // Create a wal entry. Everything is set to the current index as bytes or int/long.
    entryBuilder.clear();
    entryBuilder.setKey(entryBuilder.getKeyBuilder().setLogSequenceNumber(i)
      .setEncodedRegionName(ByteString.copyFrom(bytes)).setWriteTime(i).setTableName(tableName)
      .build());
    // Lets have one Cell associated with each WALEdit.
    entryBuilder.setAssociatedCellCount(1);
    entries.add(entryBuilder.build());
    // We need to add a Cell per WALEdit to the cells array.
    CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
    // Make cells whose row, family, cell, value, and ts are == 'i'.
    Cell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes)
      .setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build();
    cells.add(cell);
  }
  // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has
  // all Cells from all the WALEntries made above.
  CellScanner cellScanner = new CellScanner() {
    // Set to -1 because advance gets called before current.
    int index = -1;

    @Override
    public Cell current() {
      return cells.get(index);
    }

    @Override
    public boolean advance() throws IOException {
      index++;
      return index < cells.size();
    }
  };
  // Call our sink.
  sink.replicateEntries(entries, cellScanner, null, null, null);
  // Check what made it through and what was filtered.
  assertTrue(FILTERED.get() > 0);
  assertTrue(UNFILTERED.get() > 0);
  assertEquals(count, FILTERED.get() + UNFILTERED.get());
}
 
Example 18
Source File: TestFailForUnsupportedHBaseVersions.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
 * version. The 'completeness' of this test requires that we run the test with both a version of
 * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
 * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
 * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
 * this functionality, we need to run the test against both a supported and an unsupported version
 * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
 * Codecs).
 * @throws Exception on failure
 */
@Test(timeout = 300000 /* 5 mins */)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IndexTestingUtils.setupConfig(conf);
  // enable WAL Compression
  conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);

  // check the version to see if it isn't supported
  String version = VersionInfo.getVersion();
  boolean supported = false;
  if (Indexer.validateVersion(version, conf) == null) {
    supported = true;
  }

  // start the minicluster
  HBaseTestingUtility util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  // setup the primary table
  HTableDescriptor desc = new HTableDescriptor(
      "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
  byte[] family = Bytes.toBytes("f");
  desc.addFamily(new HColumnDescriptor(family));

  // enable indexing to a non-existant index table
  String indexTableName = "INDEX_TABLE";
  ColumnGroup fam1 = new ColumnGroup(indexTableName);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(desc);

  // get a reference to the regionserver, so we can ensure it aborts
  HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);

  // create the primary table
  HBaseAdmin admin = util.getHBaseAdmin();
  if (supported) {
    admin.createTable(desc);
    assertFalse("Hosting regeion server failed, even the HBase version (" + version
        + ") supports WAL Compression.", server.isAborted());
  } else {
    admin.createTableAsync(desc, null);

    // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
    // broken.
    while (!server.isAborted()) {
      LOG.debug("Waiting on regionserver to abort..");
    }
  }

  // cleanup
  util.shutdownMiniCluster();
}
 
Example 19
Source File: TestDefaultMemStore.java    From hbase with Apache License 2.0 4 votes vote down vote up
/** Test getNextRow from memstore
 * @throws InterruptedException
 */
@Test
public void testGetNextRow() throws Exception {
  addRows(this.memstore);
  // Add more versions to make it a little more interesting.
  Thread.sleep(1);
  addRows(this.memstore);
  Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY);
  assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty,
      new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
  for (int i = 0; i < ROW_COUNT; i++) {
    Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
        System.currentTimeMillis()));
    if (i + 1 == ROW_COUNT) {
      assertNull(nr);
    } else {
      assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr,
          new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
    }
  }
  //starting from each row, validate results should contain the starting row
  Configuration conf = HBaseConfiguration.create();
  for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
    ScanInfo scanInfo =
        new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, KeepDeletedCells.FALSE,
            HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false);
    try (InternalScanner scanner =
        new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null,
            memstore.getScanners(0))) {
      List<Cell> results = new ArrayList<>();
      for (int i = 0; scanner.next(results); i++) {
        int rowId = startRowId + i;
        Cell left = results.get(0);
        byte[] row1 = Bytes.toBytes(rowId);
        assertTrue("Row name",
          CellComparatorImpl.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0);
        assertEquals("Count of columns", QUALIFIER_COUNT, results.size());
        List<Cell> row = new ArrayList<>();
        for (Cell kv : results) {
          row.add(kv);
        }
        isExpectedRowWithoutTimestamps(rowId, row);
        // Clear out set. Otherwise row results accumulate.
        results.clear();
      }
    }
  }
}
 
Example 20
Source File: HbaseConfigurationFactoryBean.java    From pinpoint with Apache License 2.0 4 votes vote down vote up
public void afterPropertiesSet() {
    configuration = (hadoopConfig != null ? HBaseConfiguration.create(hadoopConfig) : HBaseConfiguration.create());
    addProperties(configuration, properties);
}