Java Code Examples for org.apache.hadoop.hbase.HBaseConfiguration

The following are top voted examples for showing how to use org.apache.hadoop.hbase.HBaseConfiguration. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: Oozie_MajorCompaction_Example   File: MajorCompaction.java   View source code 8 votes vote down vote up
public static void main(String[] argc) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));

  if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
    conf.set("mapreduce.job.credentials.binary",
             System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
  }

  Connection connection = ConnectionFactory.createConnection(conf);
  Admin admin = connection.getAdmin();

  System.out.println("Compacting table " + argc[0]);
  TableName tableName = TableName.valueOf(argc[0]);
  admin.majorCompact(tableName);
  while (admin.getCompactionState(tableName).toString() == "MAJOR") {
    TimeUnit.SECONDS.sleep(10);
    System.out.println("Compacting table " + argc[0]);
  }
  System.out.println("Done compacting table " + argc[0]);
}
 
Example 2
Project: SparkIsax   File: HBaseUtils.java   View source code 6 votes vote down vote up
/**
 * 连接HBase
 */
public static void open() {
	try {
		config = HBaseConfiguration.create();
		conn = HConnectionManager.createConnection(config);
		admin = new HBaseAdmin(conn);
		hbase_table = conn.getTable(ISAXIndex.TABLE_NAME);
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
Example 3
Project: QDrill   File: HBasePStoreProvider.java   View source code 6 votes vote down vote up
public HBasePStoreProvider(PStoreRegistry registry) {
  @SuppressWarnings("unchecked")
  Map<String, Object> config = (Map<String, Object>) registry.getConfig().getAnyRef(DrillHBaseConstants.SYS_STORE_PROVIDER_HBASE_CONFIG);
  this.hbaseConf = HBaseConfiguration.create();
  this.hbaseConf.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "drill-hbase-persistent-store-client");
  if (config != null) {
    for (Map.Entry<String, Object> entry : config.entrySet()) {
      this.hbaseConf.set(entry.getKey(), String.valueOf(entry.getValue()));
    }
  }
  this.storeTableName = registry.getConfig().getString(DrillHBaseConstants.SYS_STORE_PROVIDER_HBASE_TABLE);

  ClusterCoordinator coord = registry.getClusterCoordinator();
  if ((coord instanceof ZKClusterCoordinator)) {
    this.localEStoreProvider = null;
    this.zkEStoreProvider = new ZkEStoreProvider(((ZKClusterCoordinator)registry.getClusterCoordinator()).getCurator());
    this.zkAvailable = true;
  } else {
    this.localEStoreProvider = new LocalEStoreProvider();
    this.zkEStoreProvider = null;
    this.zkAvailable = false;
  }

}
 
Example 4
Project: ditb   File: HQuorumPeer.java   View source code 6 votes vote down vote up
/**
 * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
 * @param args String[] of command line arguments. Not used.
 */
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  try {
    Properties zkProperties = ZKConfig.makeZKProps(conf);
    writeMyID(zkProperties);
    QuorumPeerConfig zkConfig = new QuorumPeerConfig();
    zkConfig.parseProperties(zkProperties);

    // login the zookeeper server principal (if using security)
    ZKUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE,
      HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
      zkConfig.getClientPortAddress().getHostName());

    runZKServer(zkConfig);
  } catch (Exception e) {
    e.printStackTrace();
    System.exit(-1);
  }
}
 
Example 5
Project: ditb   File: TestRegionObserverScannerOpenHook.java   View source code 6 votes vote down vote up
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  Configuration conf = HBaseConfiguration.create();
  Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
}
 
Example 6
Project: aliyun-tablestore-hbase-client   File: TestScanRow.java   View source code 6 votes vote down vote up
public TestScanRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 7
Project: aliyun-tablestore-hbase-client   File: TestDeleteRow.java   View source code 6 votes vote down vote up
public TestDeleteRow() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    family = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 8
Project: aliyun-tablestore-hbase-client   File: TestBatchRow.java   View source code 6 votes vote down vote up
public TestBatchRow() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    family = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 9
Project: ditb   File: BaseRunner.java   View source code 6 votes vote down vote up
public void init(String additionalConf) throws IOException {
  configuration = HBaseConfiguration.create();
  HRegionServer.loadWinterConf(configuration, additionalConf);
  conn = ConnectionFactory.createConnection(configuration);
  IndexTableRelation relation;
  if (IndexType.isUserDefinedIndex(indexType))
    relation = getUserDefinedIndexTableRelation(tableName, indexType);
  else relation = getRegularIndexTableRelation(tableName, indexType);
  admin = new IndexTableAdmin(configuration, conn, relation);
  if (indexType == IndexType.LCIndex) admin.setLCIndexRange(getLCIndexRangeStr());
  //    admin.createTable(false, false);

  byte[][] splits = new byte[10][];
  for (int i = 0; i < 10; i++) {
    splits[i] = Bytes.toBytes(i * 1000);
  }
  admin.createTable(true, true, splits);
}
 
Example 10
Project: ditb   File: InfoServer.java   View source code 6 votes vote down vote up
/**
 * Create a status server on the given port.
 * The jsp scripts are taken from src/hbase-webapps/<code>name</code>.
 * @param name The name of the server
 * @param bindAddress address to bind to
 * @param port The port to use on the server
 * @param findPort whether the server should start at the given port and
 * increment by 1 until it finds a free port.
 * @throws IOException e
 */
public InfoServer(String name, String bindAddress, int port, boolean findPort,
    final Configuration c)
throws IOException {
  HttpConfig httpConfig = new HttpConfig(c);
  HttpServer.Builder builder =
    new org.apache.hadoop.hbase.http.HttpServer.Builder();

    builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() +
      bindAddress + ":" +
      port)).setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c);
    String logDir = System.getProperty("hbase.log.dir");
    if (logDir != null) {
      builder.setLogDir(logDir);
    }
  if (httpConfig.isSecure()) {
  builder.keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null))
    .keyStore(c.get("ssl.server.keystore.location"),
      HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null),
      c.get("ssl.server.keystore.type", "jks"))
    .trustStore(c.get("ssl.server.truststore.location"),
      HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null),
      c.get("ssl.server.truststore.type", "jks"));
  }
  this.httpServer = builder.build();
}
 
Example 11
Project: ditb   File: TableMapReduceUtil.java   View source code 6 votes vote down vote up
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job configuration to adjust.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReduceJob(String table,
  Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
  boolean addDependencyJars) throws IOException {
  job.setOutputFormat(TableOutputFormat.class);
  job.setReducerClass(reducer);
  job.set(TableOutputFormat.OUTPUT_TABLE, table);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Put.class);
  job.setStrings("io.serializations", job.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions =
      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  initCredentials(job);
}
 
Example 12
Project: ditb   File: TestUser.java   View source code 6 votes vote down vote up
@Test
public void testSecurityForNonSecureHadoop() {
  assertFalse("Security should be disable in non-secure Hadoop",
      User.isSecurityEnabled());

  Configuration conf = HBaseConfiguration.create();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
  assertTrue("Security should be enabled", User.isHBaseSecurityEnabled(conf));

  conf = HBaseConfiguration.create();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  assertFalse("HBase security should not be enabled if " 
      + User.HBASE_SECURITY_CONF_KEY + " is not set accordingly",
      User.isHBaseSecurityEnabled(conf));

  conf = HBaseConfiguration.create();
  conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
  assertTrue("HBase security should be enabled regardless of underlying "
      + "HDFS settings", User.isHBaseSecurityEnabled(conf));
}
 
Example 13
Project: ditb   File: TestWALObserver.java   View source code 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  // this.cluster = TEST_UTIL.getDFSCluster();
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(conf);
  this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
  this.oldLogDir = new Path(this.hbaseRootDir,
      HConstants.HREGION_OLDLOGDIR_NAME);
  this.logDir = new Path(this.hbaseRootDir,
      DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
  this.logName = HConstants.HREGION_LOGDIR_NAME;

  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
Example 14
Project: ditb   File: TestStripeStoreFileManager.java   View source code 6 votes vote down vote up
/**
 * Verifies scenario for finding a split point.
 * @param splitPointAfter Stripe to expect the split point at/after.
 * @param shouldSplitStripe If true, the split point is expected in the middle of the above
 *                          stripe; if false, should be at the end.
 * @param splitRatioToVerify Maximum split imbalance ratio.
 * @param sizes Stripe sizes.
 */
private void verifySplitPointScenario(int splitPointAfter, boolean shouldSplitStripe,
    float splitRatioToVerify, int... sizes) throws Exception {
  assertTrue(sizes.length > 1);
  ArrayList<StoreFile> sfs = new ArrayList<StoreFile>();
  for (int sizeIx = 0; sizeIx < sizes.length; ++sizeIx) {
    byte[] startKey = (sizeIx == 0) ? OPEN_KEY : Bytes.toBytes(sizeIx - 1);
    byte[] endKey = (sizeIx == sizes.length - 1) ? OPEN_KEY : Bytes.toBytes(sizeIx);
    MockStoreFile sf = createFile(sizes[sizeIx], 0, startKey, endKey);
    sf.splitPoint = Bytes.toBytes(-sizeIx); // set split point to the negative index
    sfs.add(sf);
  }

  Configuration conf = HBaseConfiguration.create();
  if (splitRatioToVerify != 0) {
    conf.setFloat(StripeStoreConfig.MAX_REGION_SPLIT_IMBALANCE_KEY, splitRatioToVerify);
  }
  StripeStoreFileManager manager = createManager(al(), conf);
  manager.addCompactionResults(al(), sfs);
  int result = Bytes.toInt(manager.getSplitPoint());
  // Either end key and thus positive index, or "middle" of the file and thus negative index.
  assertEquals(splitPointAfter * (shouldSplitStripe ? -1 : 1), result);
}
 
Example 15
Project: ditb   File: ReplicationPeersZKImpl.java   View source code 6 votes vote down vote up
@Override
public Pair<ReplicationPeerConfig, Configuration> getPeerConf(String peerId)
    throws ReplicationException {
  ReplicationPeerConfig peerConfig = getReplicationPeerConfig(peerId);

  if (peerConfig == null) {
    return null;
  }

  Configuration otherConf;
  try {
    otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey());
  } catch (IOException e) {
    LOG.error("Can't get peer configuration for peerId=" + peerId + " because:", e);
    return null;
  }

  if (!peerConfig.getConfiguration().isEmpty()) {
    CompoundConfiguration compound = new CompoundConfiguration();
    compound.add(otherConf);
    compound.addStringMap(peerConfig.getConfiguration());
    return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, compound);
  }

  return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, otherConf);
}
 
Example 16
Project: ditb   File: TestGlobalEventLoopGroup.java   View source code 6 votes vote down vote up
@Test
public void test() {
  Configuration conf = HBaseConfiguration.create();
  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, true);
  AsyncRpcClient client = new AsyncRpcClient(conf);
  assertNotNull(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP);
  AsyncRpcClient client1 = new AsyncRpcClient(conf);
  assertSame(client.bootstrap.group(), client1.bootstrap.group());
  client1.close();
  assertFalse(client.bootstrap.group().isShuttingDown());

  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, false);
  AsyncRpcClient client2 = new AsyncRpcClient(conf);
  assertNotSame(client.bootstrap.group(), client2.bootstrap.group());
  client2.close();

  client.close();
}
 
Example 17
Project: hbase-tutorials   File: HBaseConnectionUtils.java   View source code 5 votes vote down vote up
private static Configuration getConfiguration() throws IOException {

        Properties props = PropertiesUtils.load("hbase.properties");

        Configuration config = HBaseConfiguration.create();
        config.set("hbase.zookeeper.property.clientPort", props.getProperty("hbase.zookeeper.property.clientPort"));
        config.set("hbase.zookeeper.quorum", props.getProperty("hbase.zookeeper.quorum"));
        return config;
    }
 
Example 18
Project: big_data   File: AnalyserLogDataRunner.java   View source code 5 votes vote down vote up
@Override
public void setConf(Configuration conf) {
	conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
	conf.set("yarn.resourcemanager.hostname", "hadoop1");
	conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
	this.conf = HBaseConfiguration.create(conf);
}
 
Example 19
Project: ditb   File: TestStripeStoreEngine.java   View source code 5 votes vote down vote up
@Test
public void testCreateBasedOnConfig() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName());
  StripeStoreEngine se = createEngine(conf);
  assertTrue(se.getCompactionPolicy() instanceof StripeCompactionPolicy);
}
 
Example 20
Project: ditb   File: TestKeyProvider.java   View source code 5 votes vote down vote up
@Test
public void testTestProvider() {
  Configuration conf = HBaseConfiguration.create();
  conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
  KeyProvider provider = Encryption.getKeyProvider(conf);
  assertNotNull("Null returned for provider", provider);
  assertTrue("Provider is not the expected type", provider instanceof KeyProviderForTesting);

  Key key = provider.getKey("foo");
  assertNotNull("Test provider did not return a key as expected", key);
  assertEquals("Test provider did not create a key for AES", key.getAlgorithm(), "AES");
  assertEquals("Test provider did not create a key of adequate length",
    key.getEncoded().length, AES.KEY_LENGTH);
}
 
Example 21
Project: ditb   File: IndexBuilder.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if(otherArgs.length < 3) {
    System.err.println("Only " + otherArgs.length + " arguments supplied, required: 3");
    System.err.println("Usage: IndexBuilder <TABLE_NAME> <COLUMN_FAMILY> <ATTR> [<ATTR> ...]");
    System.exit(-1);
  }
  Job job = configureJob(conf, otherArgs);
  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example 22
Project: ditb   File: TestStripeCompactionPolicy.java   View source code 5 votes vote down vote up
@Test
public void testSingleStripeDropDeletes() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  StripeCompactionPolicy policy = createPolicy(conf);
  // Verify the deletes can be dropped if there are no L0 files.
  Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
  StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
  verifySingleStripeCompaction(policy, si, 0, true);
  // But cannot be dropped if there are.
  si = createStripesWithSizes(2, 2, stripes);
  verifySingleStripeCompaction(policy, si, 0, false);
  // Unless there are enough to cause L0 compaction.
  si = createStripesWithSizes(6, 2, stripes);
  ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>();
  sfs.addSublist(si.getLevel0Files());
  sfs.addSublist(si.getStripes().get(0));
  verifyCompaction(
      policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
  // If we cannot actually compact all files in some stripe, L0 is chosen.
  si = createStripesWithSizes(6, 2,
      new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
  verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
  // even if L0 has no file
  // if all files of stripe aren't selected, delete must not be dropped.
  stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
  si = createStripesWithSizes(0, 0, stripes);
  List<StoreFile> compact_file = new ArrayList<StoreFile>();
  Iterator<StoreFile> iter = si.getStripes().get(0).listIterator(1);
  while (iter.hasNext()) {
      compact_file.add(iter.next());
  }
  verifyCompaction(policy, si, compact_file, false, 1, null, si.getStartRow(0), si.getEndRow(0), true);
}
 
Example 23
Project: uavstack   File: HBaseDataSource.java   View source code 5 votes vote down vote up
@Override
protected Connection initSourceConnect() throws IOException, ServiceException {

    // 目前只有zklist转成serverlist和dbname
    Configuration config = HBaseConfiguration.create();
    String address = connection.toString(",");
    config.set(DataStoreProtocol.HBASE_ZK_QUORUM, address);
    config.set("hbase.client.scanner.caching",
            (String) connection.getContext(DataStoreProtocol.HBASE_QUERY_CACHING));
    config.set("hbase.client.scanner.max.result.size",
            (String) connection.getContext(DataStoreProtocol.HBASE_QUERY_MAXRESULTSIZE));
    config.set("zookeeper.recovery.retry", String.valueOf(connection.getRetryTimes()));

    // Failed to replace a bad datanode exception protection configuration
    config.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
    config.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");

    HBaseAdmin.checkHBaseAvailable(config);
    conn = ConnectionFactory.createConnection(config);
    // hbase.client.retries.number = 1 and zookeeper.recovery.retry = 1.
    return conn;
}
 
Example 24
Project: ditb   File: TestDefaultStoreEngine.java   View source code 5 votes vote down vote up
@Test
public void testCustomParts() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, DummyCompactor.class.getName());
  conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
      DummyCompactionPolicy.class.getName());
  conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
      DummyStoreFlusher.class.getName());
  Store mockStore = Mockito.mock(Store.class);
  StoreEngine<?, ?, ?, ?> se = StoreEngine.create(mockStore, conf, new KVComparator());
  Assert.assertTrue(se instanceof DefaultStoreEngine);
  Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy);
  Assert.assertTrue(se.getStoreFlusher() instanceof DummyStoreFlusher);
  Assert.assertTrue(se.getCompactor() instanceof DummyCompactor);
}
 
Example 25
Project: ditb   File: TestUser.java   View source code 5 votes vote down vote up
@Test
public void testCacheGetGroups() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  UserProvider up = UserProvider.instantiate(conf);

  // VERY unlikely that this user will exist on the box.
  // This should mean the user has no groups.
  String nonUser = "kklvfnvhdhcenfnniilggljhdecjhidkle";

  // Create two UGI's for this username
  UserGroupInformation ugiOne = UserGroupInformation.createRemoteUser(nonUser);
  UserGroupInformation ugiTwo = UserGroupInformation.createRemoteUser(nonUser);

  // Now try and get the user twice.
  User uOne = up.create(ugiOne);
  User uTwo = up.create(ugiTwo);

  // Make sure that we didn't break groups and everything worked well.
  assertArrayEquals(uOne.getGroupNames(),uTwo.getGroupNames());

  // Check that they are referentially equal.
  // Since getting a group for a users that doesn't exist creates a new string array
  // the only way that they should be referentially equal is if the cache worked and
  // made sure we didn't go to hadoop's script twice.
  assertTrue(uOne.getGroupNames() == uTwo.getGroupNames());
  assertEquals(0, ugiOne.getGroupNames().length);
}
 
Example 26
Project: ditb   File: TestRegionObserverBypass.java   View source code 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
Example 27
Project: dremio-oss   File: HBaseStoragePluginConfig.java   View source code 5 votes vote down vote up
@JsonIgnore
public Configuration getHBaseConf() {
  if (hbaseConf == null) {
    hbaseConf = HBaseConfiguration.create();
    if (config != null) {
      for (Map.Entry<String, String> entry : config.entrySet()) {
        hbaseConf.set(entry.getKey(), entry.getValue());
      }
    }
  }
  return hbaseConf;
}
 
Example 28
Project: ditb   File: TestStripeCompactionPolicy.java   View source code 5 votes vote down vote up
@Test
public void testOldStripesFromFlush() throws Exception {
  StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
  StripeInformationProvider si = createStripes(0, KEY_C, KEY_D);

  KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
  KeyValue[][] expected = new KeyValue[][] { new KeyValue[] { KV_B },
      new KeyValue[] { KV_C, KV_C }, new KeyValue[] {  KV_D, KV_E } };
  verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, KEY_C, KEY_D, OPEN_KEY });
}
 
Example 29
Project: ditb   File: TestDefaultMemStore.java   View source code 5 votes vote down vote up
/**
 * Test a pathological pattern that shows why we can't currently
 * use the MSLAB for upsert workloads. This test inserts data
 * in the following pattern:
 *
 * - row0001 through row1000 (fills up one 2M Chunk)
 * - row0002 through row1001 (fills up another 2M chunk, leaves one reference
 *   to the first chunk
 * - row0003 through row1002 (another chunk, another dangling reference)
 *
 * This causes OOME pretty quickly if we use MSLAB for upsert
 * since each 2M chunk is held onto by a single reference.
 */
public void testUpsertMSLAB() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setBoolean(DefaultMemStore.USEMSLAB_KEY, true);
  memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR);

  int ROW_SIZE = 2048;
  byte[] qualifier = new byte[ROW_SIZE - 4];

  MemoryMXBean bean = ManagementFactory.getMemoryMXBean();
  for (int i = 0; i < 3; i++) { System.gc(); }
  long usageBefore = bean.getHeapMemoryUsage().getUsed();

  long size = 0;
  long ts=0;

  for (int newValue = 0; newValue < 1000; newValue++) {
    for (int row = newValue; row < newValue + 1000; row++) {
      byte[] rowBytes = Bytes.toBytes(row);
      size += memstore.updateColumnValue(rowBytes, FAMILY, qualifier, newValue, ++ts);
    }
  }
  System.out.println("Wrote " + ts + " vals");
  for (int i = 0; i < 3; i++) { System.gc(); }
  long usageAfter = bean.getHeapMemoryUsage().getUsed();
  System.out.println("Memory used: " + (usageAfter - usageBefore)
      + " (heapsize: " + memstore.heapSize() +
      " size: " + size + ")");
}
 
Example 30
Project: ditb   File: TestHeapMemoryManager.java   View source code 5 votes vote down vote up
@Test
public void testWhenClusterIsWriteHeavyWithEmptyMemstore() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub();
  // Empty block cache and memstore
  blockCache.setTestBlockSize(0);
  regionServerAccounting.setTestMemstoreSize(0);
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), regionServerAccounting);
  long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
  long oldBlockCacheSize = blockCache.maxSize;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  memStoreFlusher.flushType = FlushType.ABOVE_HIGHER_MARK;
  memStoreFlusher.requestFlush(null, false);
  memStoreFlusher.requestFlush(null, false);
  memStoreFlusher.requestFlush(null, false);
  memStoreFlusher.flushType = FlushType.ABOVE_LOWER_MARK;
  memStoreFlusher.requestFlush(null, false);
  // Allow the tuner to run once and do necessary memory up
  Thread.sleep(1500);
  // No changes should be made by tuner as we already have lot of empty space
  assertEquals(oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
  assertEquals(oldBlockCacheSize, blockCache.maxSize);
}
 
Example 31
Project: ditb   File: TestFSUtils.java   View source code 5 votes vote down vote up
@Test
public void testPermMask() throws Exception {

  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  // default fs permission
  FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // 'hbase.data.umask.enable' is false. We will get default fs permission.
  assertEquals(FsPermission.getFileDefault(), defaultFsPerm);

  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  // first check that we don't crash if we don't have perms set
  FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // default 'hbase.data.umask'is 000, and this umask will be used when
  // 'hbase.data.umask.enable' is true.
  // Therefore we will not get the real fs default in this case.
  // Instead we will get the starting point FULL_RWX_PERMISSIONS
  assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);

  conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
  // now check that we get the right perms
  FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  assertEquals(new FsPermission("700"), filePerm);

  // then that the correct file is created
  Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
    out.close();
    FileStatus stat = fs.getFileStatus(p);
    assertEquals(new FsPermission("700"), stat.getPermission());
    // and then cleanup
  } finally {
    fs.delete(p, true);
  }
}
 
Example 32
Project: ditb   File: FSHLog.java   View source code 5 votes vote down vote up
/**
 * Pass one or more log file names and it will either dump out a text version
 * on <code>stdout</code> or split the specified log files.
 *
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 2) {
    usage();
    System.exit(-1);
  }
  // either dump using the WALPrettyPrinter or split, depending on args
  if (args[0].compareTo("--dump") == 0) {
    WALPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
  } else if (args[0].compareTo("--perf") == 0) {
    LOG.fatal("Please use the WALPerformanceEvaluation tool instead. i.e.:");
    LOG.fatal("\thbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation --iterations " +
        args[1]);
    System.exit(-1);
  } else if (args[0].compareTo("--split") == 0) {
    Configuration conf = HBaseConfiguration.create();
    for (int i = 1; i < args.length; i++) {
      try {
        Path logPath = new Path(args[i]);
        FSUtils.setFsDefault(conf, logPath);
        split(conf, logPath);
      } catch (IOException t) {
        t.printStackTrace(System.err);
        System.exit(-1);
      }
    }
  } else {
    usage();
    System.exit(-1);
  }
}
 
Example 33
Project: ditb   File: TestStore.java   View source code 5 votes vote down vote up
/**
 * Test for hbase-1686.
 * @throws IOException
 */
@Test
public void testEmptyStoreFile() throws IOException {
  init(this.name.getMethodName());
  // Write a store file.
  this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
  this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
  flush(1);
  // Now put in place an empty store file.  Its a little tricky.  Have to
  // do manually with hacked in sequence id.
  StoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = f.getMaxSequenceId();
  Configuration c = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(c);
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(meta)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  this.store.close();
  // Reopen it... should pick up two files
  this.store = new HStore(this.store.getHRegion(), this.store.getFamily(), c);
  Assert.assertEquals(2, this.store.getStorefilesCount());

  result = HBaseTestingUtility.getFromStoreFile(store,
      get.getRow(),
      qualifiers);
  Assert.assertEquals(1, result.size());
}
 
Example 34
Project: aliyun-tablestore-hbase-client   File: TestCheckAndDeleteRow.java   View source code 5 votes vote down vote up
public TestCheckAndDeleteRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 35
Project: aliyun-tablestore-hbase-client   File: TestCheckAndPutRow.java   View source code 5 votes vote down vote up
public TestCheckAndPutRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 36
Project: aliyun-tablestore-hbase-client   File: TestPutRow.java   View source code 5 votes vote down vote up
public TestPutRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 37
Project: ditb   File: TestStore.java   View source code 5 votes vote down vote up
@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
  final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
  long anyValue = 10;

  // We'll check that it uses correct config and propagates it appropriately by going thru
  // the simplest "real" path I can find - "throttleCompaction", which just checks whether
  // a number we pass in is higher than some config value, inside compactionPolicy.
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(CONFIG_KEY, anyValue);
  init(name.getMethodName() + "-xml", conf);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HTD overrides XML.
  --anyValue;
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-htd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HCD overrides them both.
  --anyValue;
  hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-hcd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));
}
 
Example 38
Project: aliyun-tablestore-hbase-client   File: TestBufferedMutator.java   View source code 5 votes vote down vote up
public TestBufferedMutator() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    mutator = connection.getBufferedMutator(tableName);
    table = connection.getTable(tableName);
}
 
Example 39
Project: ditb   File: TestStripeCompactionPolicy.java   View source code 5 votes vote down vote up
@Test
public void testWithParallelCompaction() throws Exception {
  // TODO: currently only one compaction at a time per store is allowed. If this changes,
  //       the appropriate file exclusion testing would need to be done in respective tests.
  assertNull(createPolicy(HBaseConfiguration.create()).selectCompaction(
      mock(StripeInformationProvider.class), al(createFile()), false));
}
 
Example 40
Project: SkyEye   File: HbaseAutoConfiguration.java   View source code 5 votes vote down vote up
@Bean
@ConditionalOnMissingBean(HbaseTemplate.class)
public HbaseTemplate hbaseTemplate() {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_QUORUM, this.hbaseProperties.getQuorum());
    configuration.set(HBASE_ROOTDIR, hbaseProperties.getRootDir());
    configuration.set(HBASE_ZNODE_PARENT, hbaseProperties.getNodeParent());
    return new HbaseTemplate(configuration);
}