Java Code Examples for org.apache.hadoop.hbase.HBaseConfiguration

The following examples show how to use org.apache.hadoop.hbase.HBaseConfiguration. These examples are extracted from open source projects.
Example 1
Project: hudi   File: HBaseIndex.java    License: Apache License 2.0 6 votes vote down vote up
private Connection getHBaseConnection() {
  Configuration hbaseConfig = HBaseConfiguration.create();
  String quorum = config.getHbaseZkQuorum();
  hbaseConfig.set("hbase.zookeeper.quorum", quorum);
  String zkZnodeParent = config.getHBaseZkZnodeParent();
  if (zkZnodeParent != null) {
    hbaseConfig.set("zookeeper.znode.parent", zkZnodeParent);
  }
  String port = String.valueOf(config.getHbaseZkPort());
  hbaseConfig.set("hbase.zookeeper.property.clientPort", port);
  try {
    return ConnectionFactory.createConnection(hbaseConfig);
  } catch (IOException e) {
    throw new HoodieDependentSystemUnavailableException(HoodieDependentSystemUnavailableException.HBASE,
        quorum + ":" + port);
  }
}
 
Example 2
Project: hbase   File: TestTableInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void initialize(JobContext job) throws IOException {
  Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
      job.getConfiguration()));
  TableName tableName = TableName.valueOf("exampleTable");
  // mandatory
  initializeTable(connection, tableName);
  byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
    Bytes.toBytes("columnB") };
  //optional
  Scan scan = new Scan();
  for (byte[] family : inputColumns) {
    scan.addFamily(family);
  }
  Filter exampleFilter =
    new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
  scan.setFilter(exampleFilter);
  setScan(scan);
}
 
Example 3
Project: phoenix   File: IndexUpgradeTool.java    License: Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public int executeTool() {
    Configuration conf = HBaseConfiguration.addHbaseResources(getConf());

    try (Connection conn = getConnection(conf)) {
        ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class)
                .getQueryServices();

        boolean status = extractTablesAndIndexes(conn.unwrap(PhoenixConnection.class));

        if (status) {
            return executeTool(conn, queryServices, conf);
        }
    } catch (SQLException e) {
        LOGGER.severe("Something went wrong in executing tool "+ e);
    }
    return -1;
}
 
Example 4
Project: phoenix   File: IndexScrutinyTool.java    License: Apache License 2.0 6 votes vote down vote up
private Job configureSubmittableJob(Job job, Path outputPath, Class<IndexScrutinyMapperForTest> mapperClass) throws Exception {
    Configuration conf = job.getConfiguration();
    conf.setBoolean("mapreduce.job.user.classpath.first", true);
    HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
    job.setJarByClass(IndexScrutinyTool.class);
    job.setOutputFormatClass(NullOutputFormat.class);
    if (outputInvalidRows && OutputFormat.FILE.equals(outputFormat)) {
        job.setOutputFormatClass(TextOutputFormat.class);
        FileOutputFormat.setOutputPath(job, outputPath);
    }
    job.setMapperClass((mapperClass == null ? IndexScrutinyMapper.class : mapperClass));
    job.setNumReduceTasks(0);
    // Set the Output classes
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    TableMapReduceUtil.addDependencyJars(job);
    return job;
}
 
Example 5
Project: ranger   File: HBaseRangerAuthorizationTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testReadRowFromColFam2AsGroupIT() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", "" + port);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    String user = "public";

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            Connection conn = ConnectionFactory.createConnection(conf);
            Table table = conn.getTable(TableName.valueOf("temp"));
            
            // Read a row
            Get get = new Get(Bytes.toBytes("row1"));
            Result result = table.get(get);
            byte[] valResult = result.getValue(Bytes.toBytes("colfam2"), Bytes.toBytes("col1"));
            Assert.assertNull(valResult);

            conn.close();
            return null;
        }
    });
}
 
Example 6
Project: Kylin   File: TestHbaseClient.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
    foo(6, 5);
    foo(5, 2);
    foo(3, 0);

    Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "hbase_host");
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    HTable table = new HTable(conf, "test1");
    Put put = new Put(Bytes.toBytes("row1"));

    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));
    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2"));

    table.put(put);
    table.close();
}
 
Example 7
Project: hbase   File: ReplicationSink.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a sink for replication
 * @param conf conf object
 * @param stopper boolean to tell this thread to stop
 * @throws IOException thrown when HDFS goes bad or bad file name
 */
public ReplicationSink(Configuration conf, Stoppable stopper)
    throws IOException {
  this.conf = HBaseConfiguration.create(conf);
  decorateConf();
  this.metrics = new MetricsSink();
  this.walEntrySinkFilter = setupWALEntrySinkFilter();
  String className = conf.get("hbase.replication.source.fs.conf.provider",
    DefaultSourceFSConfigurationProvider.class.getCanonicalName());
  try {
    Class<? extends SourceFSConfigurationProvider> c =
        Class.forName(className).asSubclass(SourceFSConfigurationProvider.class);
    this.provider = c.getDeclaredConstructor().newInstance();
  } catch (Exception e) {
    throw new IllegalArgumentException(
        "Configured source fs configuration provider class " + className + " throws error.", e);
  }
}
 
Example 8
@BeforeClass
public static void beforeAllTests() throws Exception {
  groups = new String[] { RSGroupInfo.DEFAULT_GROUP };
  servers = generateServers(3);
  groupMap = constructGroupInfo(servers, groups);
  tableDescs = constructTableDesc(false);
  Configuration conf = HBaseConfiguration.create();
  conf.set("hbase.regions.slop", "0");
  conf.setFloat("hbase.master.balancer.stochastic.readRequestCost", 10000f);
  conf.set("hbase.rsgroup.grouploadbalancer.class",
      StochasticLoadBalancer.class.getCanonicalName());
  loadBalancer = new RSGroupBasedLoadBalancer();
  loadBalancer.setRsGroupInfoManager(getMockedGroupInfoManager());
  loadBalancer.setMasterServices(getMockedMaster());
  loadBalancer.setConf(conf);
  loadBalancer.initialize();
}
 
Example 9
Project: hbase   File: TestThriftConnection.java    License: Apache License 2.0 6 votes vote down vote up
private static ThriftServer startThriftServer(int port, boolean useHttp) {
  Configuration thriftServerConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  thriftServerConf.setInt(Constants.PORT_CONF_KEY, port);
  if (useHttp) {
    thriftServerConf.setBoolean(Constants.USE_HTTP_CONF_KEY, true);
  }
  ThriftServer server = new ThriftServer(thriftServerConf);
  Thread thriftServerThread = new Thread(() -> {
    try{
      server.run();
    } catch (Exception t) {
      LOG.error("Thrift Server failed", t);
    }
  });
  thriftServerThread.setDaemon(true);
  thriftServerThread.start();
  if (useHttp) {
    TEST_UTIL.waitFor(10000, () -> server.getHttpServer() != null);
  } else {
    TEST_UTIL.waitFor(10000, () -> server.getTserver() != null);
  }
  return server;
}
 
Example 10
Project: metron   File: HBaseDao.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized void init(AccessConfig config) {
  if(this.tableInterface == null) {
    this.config = config;
    Map<String, Object> globalConfig = config.getGlobalConfigSupplier().get();
    if(globalConfig == null) {
      throw new IllegalStateException("Cannot find the global config.");
    }
    String table = (String)globalConfig.get(HBASE_TABLE);
    String cf = (String) config.getGlobalConfigSupplier().get().get(HBASE_CF);
    if(table == null || cf == null) {
      throw new IllegalStateException("You must configure " + HBASE_TABLE + " and " + HBASE_CF + " in the global config.");
    }
    try {
      tableInterface = config.getTableProvider().getTable(HBaseConfiguration.create(), table);
      this.cf = cf.getBytes(StandardCharsets.UTF_8);
    } catch (IOException e) {
      throw new IllegalStateException("Unable to initialize HBaseDao: " + e.getMessage(), e);
    }
  }
}
 
Example 11
Project: hbase   File: TestThriftConnection.java    License: Apache License 2.0 6 votes vote down vote up
private static Connection createConnection(int port, boolean useHttp) throws IOException {
  Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ThriftConnection.class.getName());
  if (useHttp) {
    conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
        ThriftConnection.HTTPThriftClientBuilder.class.getName());
  }
  String host = HConstants.LOCALHOST;
  if (useHttp) {
    host = "http://" + host;
  }
  conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host);
  conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port);
  return ConnectionFactory.createConnection(conf);
}
 
Example 12
Project: flink-learning   File: Main.java    License: Apache License 2.0 6 votes vote down vote up
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
Example 13
Project: phoenix   File: DropIndexDuringUpsertIT.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void doSetup() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    conf.setInt("hbase.client.retries.number", 2);
    conf.setInt("hbase.client.pause", 5000);
    conf.setInt("hbase.balancer.period", Integer.MAX_VALUE);
    conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
    util = new HBaseTestingUtility(conf);
    util.startMiniCluster(NUM_SLAVES);
    String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;

    Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
    // Must update config before starting server
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
    driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example 14
Project: opensoc-streaming   File: HBaseStreamPartitioner.java    License: Apache License 2.0 6 votes vote down vote up
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
  
  System.out.println("preparing HBaseStreamPartitioner for streamId " + stream.get_streamId());
  this.targetTasks = targetTasks;
  this.targetTasksSize = this.targetTasks.size();

  Configuration conf = HBaseConfiguration.create();
  try {
    hTable = new HTable(conf, tableName);
    refreshRegionInfo(tableName);

    System.out.println("regionStartKeyRegionNameMap: " + regionStartKeyRegionNameMap);

  } catch (IOException e) {
    e.printStackTrace();
  }

}
 
Example 15
Project: hbase   File: BackupSystemTable.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Get backup system table descriptor
 * @return table's descriptor
 */
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
  TableDescriptorBuilder builder =
      TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));

  ColumnFamilyDescriptorBuilder colBuilder =
      ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
  colBuilder.setMaxVersions(1);
  Configuration config = HBaseConfiguration.create();
  int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
    BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
  colBuilder.setTimeToLive(ttl);
  ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
  builder.setColumnFamily(colSessionsDesc);
  colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
  colBuilder.setTimeToLive(ttl);
  builder.setColumnFamily(colBuilder.build());
  return builder.build();
}
 
Example 16
Project: super-cloudops   File: HfileBulkImporter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * e.g.</br>
 * 
 * <pre>
 *  yarn jar super-devops-tool-hbase-migrator-master.jar \
 *  com.wl4g.devops.tool.hbase.migrator.HfileBulkImporter \
 *  -z emr-header-1:2181 \
 *  -t safeclound.tb_elec_power \
 *  -p /tmp-devops/safeclound.tb_elec_power
 * </pre>
 * 
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
	HbaseMigrateUtils.showBanner();

	CommandLine line = new Builder().option("z", "zkaddr", null, "Zookeeper address.")
			.option("t", "tabname", null, "Hbase table name.")
			.option("p", "path", null, "Data hdfs path to be import. e.g. hdfs://localhost:9000/bak/safeclound.tb_air")
			.build(args);

	Configuration cfg = HBaseConfiguration.create();
	cfg.set("hbase.zookeeper.quorum", line.getOptionValue("z"));
	Connection conn = ConnectionFactory.createConnection(cfg);
	Admin admin = conn.getAdmin();
	Table table = conn.getTable(TableName.valueOf(line.getOptionValue("t")));
	LoadIncrementalHFiles load = new LoadIncrementalHFiles(cfg);
	load.doBulkLoad(new Path(line.getOptionValue("p")), admin, table,
			conn.getRegionLocator(TableName.valueOf(line.getOptionValue("t"))));
}
 
Example 17
Project: eagle   File: TestHBaseBase.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpHBase() {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set("zookeeper.znode.parent", getZkZnodeParent());
    configuration.setInt("hbase.master.info.port", -1);//avoid port clobbering
    configuration.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering
    hbase = new HBaseTestingUtility(configuration);
    try {
        hbase.startMiniCluster();
    } catch (Exception e) {
        LOGGER.error("Error to start hbase mini cluster: " + e.getMessage(), e);
        throw new IllegalStateException(e);
    }
    System.setProperty("storage.hbase.autoCreateTable","false");
    System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent());
    System.setProperty("storage.hbase.zookeeperPropertyClientPort", String.valueOf(hbase.getZkCluster().getClientPort()));
}
 
Example 18
Project: hbase   File: TestExecutorService.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSnapshotHandlers() throws Exception {
  final Configuration conf = HBaseConfiguration.create();
  final Server server = mock(Server.class);
  when(server.getConfiguration()).thenReturn(conf);

  ExecutorService executorService = new ExecutorService("testSnapshotHandlers");
  executorService.startExecutorService(ExecutorType.MASTER_SNAPSHOT_OPERATIONS, 1);

  CountDownLatch latch = new CountDownLatch(1);
  CountDownLatch waitForEventToStart = new CountDownLatch(1);
  executorService.submit(new EventHandler(server, EventType.C_M_SNAPSHOT_TABLE) {
    @Override
    public void process() throws IOException {
      waitForEventToStart.countDown();
      try {
        latch.await();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
    }
  });

  //Wait EventHandler to start
  waitForEventToStart.await(10, TimeUnit.SECONDS);
  int activeCount = executorService.getExecutor(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
      .getThreadPoolExecutor().getActiveCount();
  Assert.assertEquals(1, activeCount);
  latch.countDown();
  Waiter.waitFor(conf, 3000, () -> {
    int count = executorService.getExecutor(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
        .getThreadPoolExecutor().getActiveCount();
    return count == 0;
  });
}
 
Example 19
Project: phoenix   File: UpdateStatisticsToolTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRestoreDirFromConfig() {
    UpdateStatisticsTool tool = new UpdateStatisticsTool();
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(FS_DEFAULT_NAME_KEY, "hdfs://base-dir");
    tool.setConf(configuration);
    tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg"});
    assertEquals("hdfs://base-dir/tmp", tool.getRestoreDir().toString());
}
 
Example 20
Project: attic-apex-malhar   File: HBaseTestHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static Configuration getConfiguration()
{
  Configuration conf = HBaseConfiguration.create();
  conf.set("hbase.zookeeper.quorum", "127.0.0.1");
  conf.set("hbase.zookeeper.property.clientPort", "2181");
  return conf;
}
 
Example 21
Project: hbase   File: ThriftServer.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String [] args) throws Exception {
  LOG.info("***** STARTING service '" + ThriftServer.class.getSimpleName() + "' *****");
  VersionInfo.logVersion();
  final Configuration conf = HBaseConfiguration.create();
  // for now, only time we return is on an argument error.
  final int status = ToolRunner.run(conf, new ThriftServer(conf), args);
  LOG.info("***** STOPPING service '" + ThriftServer.class.getSimpleName() + "' *****");
  System.exit(status);
}
 
Example 22
Project: hbase   File: TestSimpleRequestController.java    License: Apache License 2.0 5 votes vote down vote up
private void testIllegalArgument(String key, long value) {
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(key, value);
  try {
    new SimpleRequestController(conf);
    fail("The " + key + " must be bigger than zero");
  } catch (IllegalArgumentException e) {
  }
}
 
Example 23
Project: hbase   File: TestSimpleRpcScheduler.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testScanQueueWithZeroScanRatio() throws Exception {

  Configuration schedConf = HBaseConfiguration.create();
  schedConf.setFloat(RpcExecutor.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 1.0f);
  schedConf.setFloat(RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5f);
  schedConf.setFloat(RWQueueRpcExecutor.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0f);

  PriorityFunction priority = mock(PriorityFunction.class);
  when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS);

  RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 2, 1, 1, priority,
                                                  HConstants.QOS_THRESHOLD);
  assertNotEquals(null, scheduler);
}
 
Example 24
Project: jstorm   File: AbstractHBaseClient.java    License: Apache License 2.0 5 votes vote down vote up
public Configuration makeConf(Map stormConf) {
    Configuration hbaseConf = HBaseConfiguration.create();
    String hbaseQuorum = (String) stormConf.get(HBASE_QUORUM_CONF_KEY);
    hbaseConf.set(HBASE_QUORUM_KEY, hbaseQuorum);

    String hbasePort = stormConf.get(HBASE_PORT_CONF_KEY) + "";
    hbaseConf.set(HBASE_PORT_KEY, hbasePort);

    String hbaseParent = (String) stormConf.get(HBASE_ZK_PARENT_CONF_KEY);
    hbaseConf.set(HBASE_ZK_PARENT_KEY, hbaseParent);

    return hbaseConf;
}
 
Example 25
Project: phoenix   File: IndexUpgradeToolTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConnectionProperties() throws Exception {
    Configuration conf = HBaseConfiguration.create();

    long indexRebuildQueryTimeoutMs = 2000;
    long indexRebuildRpcTimeoutMs = 3000;
    long indexRebuildClientScannerTimeoutMs = 4000;
    int indexRebuildRpcRetryCount = 10;

    conf.setLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, indexRebuildQueryTimeoutMs);
    conf.setLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, indexRebuildRpcTimeoutMs);
    conf.setLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
            indexRebuildClientScannerTimeoutMs);
    conf.setInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, indexRebuildRpcRetryCount);

    // prepare conf for connectionless query
    setupConfForConnectionlessQuery(conf);

    try (Connection conn = IndexUpgradeTool.getConnection(conf)) {
        // verify connection properties for phoenix, hbase timeouts and retries
        Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB),
                Long.toString(indexRebuildQueryTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY),
                Long.toString(indexRebuildRpcTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD),
                Long.toString(indexRebuildClientScannerTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER),
                Long.toString(indexRebuildRpcRetryCount));
    }
}
 
Example 26
Project: hbase   File: TestHeapMemoryManager.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
      HeapMemoryTuner.class);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), new RegionServerAccountingStub(conf));
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.memstoreSize = 0.78f;
  CustomHeapMemoryTuner.blockCacheSize = 0.02f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
  assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
  // Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.blockCacheSize = 0.75f;
  CustomHeapMemoryTuner.memstoreSize = 0.05f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
  assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
 
Example 27
Project: bigdata-tutorial   File: Mapper2HbaseDemo.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void setup(Context context) throws IOException,
		InterruptedException {
	super.setup(context);
	conf = HBaseConfiguration.create(context.getConfiguration());
	conf.set("hbase.zookeeper.quorum", "zk1.hadoop,zk2.hadoop,zk3.hadoop");
	conf.set("hbase.zookeeper.property.clientPort", "2181");

	htable = new HTable(conf, "micmiu");
	htable.setAutoFlush(false);
	htable.setWriteBufferSize(12 * 1024 * 1024);//12M
	wal = true;
}
 
Example 28
Project: hbase   File: TestZKUtilNoServer.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateACLWithSameUser() throws IOException {
  Configuration conf = HBaseConfiguration.create();
  conf.set(Superusers.SUPERUSER_CONF_KEY, "user4,@group1,user5,user6");
  UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser("user4"));
  String node = "/hbase/testCreateACL";
  ZKWatcher watcher = new ZKWatcher(conf, node, null, false);
  List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
  assertEquals(3, aclList.size()); // 3, since service user the same as one of superuser
  assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", ""))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user5"))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user6"))));
}
 
Example 29
Project: phoenix   File: Sandbox.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    System.out.println("Starting Phoenix sandbox");
    Configuration conf = HBaseConfiguration.create();
    BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(ImmutableMap.<String, String>of()));

    final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
    testUtil.startMiniCluster();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                if (testUtil != null) {
                    testUtil.shutdownMiniCluster();
                }
            } catch (Exception e) {
                LOG.error("Exception caught when shutting down mini cluster", e);
            }
        }
    });

    int clientPort = testUtil.getZkCluster().getClientPort();
    System.out.println("\n\n\tPhoenix Sandbox is started\n\n");
    System.out.printf("\tYou can now connect with url 'jdbc:phoenix:localhost:%d'\n" +
                    "\tor connect via sqlline with 'bin/sqlline.py localhost:%d'\n\n",
            clientPort, clientPort);

    Thread.sleep(Long.MAX_VALUE);
}
 
Example 30
Project: hbase   File: TestStripeCompactionPolicy.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testNothingToCompactFromL0() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 4);
  StripeCompactionPolicy.StripeInformationProvider si = createStripesL0Only(3, 10);
  StripeCompactionPolicy policy = createPolicy(conf);
  verifyNoCompaction(policy, si);

  si = createStripes(3, KEY_A);
  verifyNoCompaction(policy, si);
}
 
Example 31
Project: learning-hadoop   File: JavaHBaseBulkDeleteExample.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String args[]) {
  if (args.length == 0) {
    System.out.println("JavaHBaseBulkDeleteExample  {master} {tableName} ");
  }

  String master = args[0];
  String tableName = args[1];

  JavaSparkContext jsc = new JavaSparkContext(master,
      "JavaHBaseBulkDeleteExample");
  jsc.addJar("SparkHBase.jar");

  List<byte[]> list = new ArrayList<byte[]>();
  list.add(Bytes.toBytes("1"));
  list.add(Bytes.toBytes("2"));
  list.add(Bytes.toBytes("3"));
  list.add(Bytes.toBytes("4"));
  list.add(Bytes.toBytes("5"));

  JavaRDD<byte[]> rdd = jsc.parallelize(list);

  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
  conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);

  hbaseContext.bulkDelete(rdd, tableName, new DeleteFunction(), 4);

}
 
Example 32
Project: hbase   File: DefaultSourceFSConfigurationProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Configuration getConf(Configuration sinkConf, String replicationClusterId)
    throws IOException {
  if (sourceClustersConfs.get(replicationClusterId) == null) {
    synchronized (this.sourceClustersConfs) {
      if (sourceClustersConfs.get(replicationClusterId) == null) {
        LOG.info("Loading source cluster FS client conf for cluster " + replicationClusterId);
        // Load only user provided client configurations.
        Configuration sourceClusterConf = new Configuration(false);

        String replicationConfDir = sinkConf.get(HConstants.REPLICATION_CONF_DIR);
        if (replicationConfDir == null) {
          LOG.debug(HConstants.REPLICATION_CONF_DIR + " is not configured.");
          URL resource = HBaseConfiguration.class.getClassLoader().getResource("hbase-site.xml");
          if (resource != null) {
            String path = resource.getPath();
            replicationConfDir = path.substring(0, path.lastIndexOf("/"));
          } else {
            replicationConfDir = System.getenv("HBASE_CONF_DIR");
          }
        }

        File confDir = new File(replicationConfDir, replicationClusterId);
        LOG.info("Loading source cluster " + replicationClusterId
                + " file system configurations from xml "
                + "files under directory " + confDir);
        String[] listofConfFiles = FileUtil.list(confDir);
        for (String confFile : listofConfFiles) {
          if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) {
            // Add all the user provided client conf files
            sourceClusterConf.addResource(new Path(confDir.getPath(), confFile));
          }
        }
        this.sourceClustersConfs.put(replicationClusterId, sourceClusterConf);
      }
    }
  }
  return this.sourceClustersConfs.get(replicationClusterId);
}
 
Example 33
Project: hbase   File: TestStripeCompactionPolicy.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSingleStripeDropDeletes() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  // Test depends on this not being set to pass.  Default breaks test.  TODO: Revisit.
  conf.unset("hbase.hstore.compaction.min.size");
  StripeCompactionPolicy policy = createPolicy(conf);
  // Verify the deletes can be dropped if there are no L0 files.
  Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
  StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
  verifySingleStripeCompaction(policy, si, 0, true);
  // But cannot be dropped if there are.
  si = createStripesWithSizes(2, 2, stripes);
  verifySingleStripeCompaction(policy, si, 0, false);
  // Unless there are enough to cause L0 compaction.
  si = createStripesWithSizes(6, 2, stripes);
  ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
  sfs.addSublist(si.getLevel0Files());
  sfs.addSublist(si.getStripes().get(0));
  verifyCompaction(
      policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
  // If we cannot actually compact all files in some stripe, L0 is chosen.
  si = createStripesWithSizes(6, 2,
      new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
  verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
  // even if L0 has no file
  // if all files of stripe aren't selected, delete must not be dropped.
  stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
  si = createStripesWithSizes(0, 0, stripes);
  List<HStoreFile> compactFile = new ArrayList<>();
  Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1);
  while (iter.hasNext()) {
    compactFile.add(iter.next());
  }
  verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0),
    true);
}
 
Example 34
Project: learning-hadoop   File: ExpressionFilterTest.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	
    if (args.length < 2) {
        throw new Exception("Table name not specified.");
    }
    Configuration conf = HBaseConfiguration.create();
    HTable table = new HTable(conf, args[0]);
    String startKey = args[1];
    
    Expression exp = ExpressionFactory.eq(ExpressionFactory
            .toLong(ExpressionFactory.toString(ExpressionFactory
                    .columnValue("family", "longStr2"))), ExpressionFactory
            .constant(Long.parseLong("99")));
    ExpressionFilter expressionFilter = new ExpressionFilter(exp);
    Scan scan = new Scan(Bytes.toBytes(startKey), expressionFilter);
    int count = 0;
    ResultScanner scanner = table.getScanner(scan);
    Result r = scanner.next();
    while (r != null) {
        count++;
        r = scanner.next();
    }
    System.out
            .println("++ Scanning finished with count : " + count + " ++");
    scanner.close();
    
}
 
Example 35
Project: hbase   File: Compressor.java    License: Apache License 2.0 5 votes vote down vote up
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
Example 36
Project: phoenix   File: PhoenixConfigurationUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static void loadHBaseConfiguration(Job job) throws IOException {
    // load hbase-site.xml
    Configuration hbaseConf = HBaseConfiguration.create();
    for (Map.Entry<String, String> entry : hbaseConf) {
        if (job.getConfiguration().get(entry.getKey()) == null) {
            job.getConfiguration().set(entry.getKey(), entry.getValue());
        }
    }
    //In order to have phoenix working on a secured cluster
    TableMapReduceUtil.initCredentials(job);
}
 
Example 37
Project: spliceengine   File: MockSnapshot.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
public static Configuration mockHBaseConfiguration(List<Pair<String, String>> attr)
{
	Configuration conf = HBaseConfiguration.create();
	for(Pair<String, String> p: attr){
		conf.set(p.getFirst(), p.getSecond());
	}
	
	return conf;
}
 
Example 38
Project: hbase   File: ThriftServer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Start up the Thrift2 server.
 */
public static void main(String[] args) throws Exception {
  final Configuration conf = HBaseConfiguration.create();
  // for now, only time we return is on an argument error.
  final int status = ToolRunner.run(conf, new ThriftServer(conf), args);
  System.exit(status);
}
 
Example 39
Project: kylin   File: RemoteDictionaryStore.java    License: Apache License 2.0 5 votes vote down vote up
static Connection getConnection() {
    Configuration conf = HBaseConfiguration.create(HadoopUtil.getCurrentConfiguration());
    try {
        return ConnectionFactory.createConnection(conf);
    } catch (IOException ioe) {
        throw new IllegalStateException("Cannot connect to HBase.", ioe);
    }
}
 
Example 40
Project: hbase   File: TestHeapMemoryManager.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWhenClusterIsWriteHeavyWithOffheapMemstore() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf);
  MemstoreFlusherStub memStoreFlusher =
      new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  // Empty block cache and but nearly filled memstore
  blockCache.setTestBlockSize(0);
  regionServerAccounting.setTestMemstoreSize((long) (maxHeapSize * 0.4 * 0.8));
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), regionServerAccounting);
  long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
  long oldBlockCacheSize = blockCache.maxSize;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // this should not change anything with onheap memstore
  memStoreFlusher.flushType = FlushType.ABOVE_OFFHEAP_HIGHER_MARK;
  memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY);
  memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY);
  memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY);
  memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY);
  // Allow the tuner to run once and do necessary memory up
  Thread.sleep(1500);
  // No changes should be made by tuner as we already have lot of empty space
  assertEquals(oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
  assertEquals(oldBlockCacheSize, blockCache.maxSize);
}