Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#getConfiguration()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#getConfiguration() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReplicationEditsDroppedWithDeletedTableCFs.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Set true to filter replication edits for dropped table
  conf1.setBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, true);
  conf1.set(ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.nb.capacity", 1);
  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  conf1 = utility1.getConfiguration();

  conf2 = HBaseConfiguration.create(conf1);
  conf2.set(ZOOKEEPER_ZNODE_PARENT, "/2");
  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  utility1.startMiniCluster(1);
  utility2.startMiniCluster(1);

  admin1 = utility1.getAdmin();
  admin2 = utility2.getAdmin();
}
 
Example 2
Source File: TestReplicationStuckWithDroppedTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.set(ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.nb.capacity", 1);
  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  conf1 = utility1.getConfiguration();

  conf2 = HBaseConfiguration.create(conf1);
  conf2.set(ZOOKEEPER_ZNODE_PARENT, "/2");
  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  utility1.startMiniCluster(1);
  utility2.startMiniCluster(1);

  admin1 = utility1.getAdmin();
  admin2 = utility2.getAdmin();
}
 
Example 3
Source File: TestMobCompactionWithDefaults.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void htuStart() throws Exception {
  HTU = new HBaseTestingUtility();
  conf = HTU.getConfiguration();
  conf.setInt("hfile.format.version", 3);
  // Disable automatic MOB compaction
  conf.setLong(MobConstants.MOB_COMPACTION_CHORE_PERIOD, 0);
  // Disable automatic MOB file cleaner chore
  conf.setLong(MobConstants.MOB_CLEANER_PERIOD, 0);
  // Set minimum age to archive to 10 sec
  conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, minAgeToArchive);
  // Set compacted file discharger interval to a half minAgeToArchive
  conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive/2);
  conf.setBoolean("hbase.regionserver.compaction.enabled", false);
  HTU.startMiniCluster();
}
 
Example 4
Source File: TestFileLink.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 5
Source File: TestServerRemoteProcedure.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  util = new HBaseTestingUtility();
  this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
      .setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build());
  master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers);
  rsDispatcher = new MockRSProcedureDispatcher(master);
  rsDispatcher.setMockRsExecutor(new NoopRSExecutor());
  master.start(2, rsDispatcher);
  am = master.getAssignmentManager();
  master.getServerManager().getOnlineServersList().stream()
      .forEach(serverName -> am.getRegionStates().getOrCreateServer(serverName));
}
 
Example 6
Source File: TransactionAwareHTableTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  testUtil = new HBaseTestingUtility();
  conf = testUtil.getConfiguration();

  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
  conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());

  conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5);
  
  // Tune down the connection thread pool size
  conf.setInt("hbase.hconnection.threads.core", 5);
  conf.setInt("hbase.hconnection.threads.max", 10);
  // Tunn down handler threads in regionserver
  conf.setInt("hbase.regionserver.handler.count", 10);

  // Set to random port
  conf.setInt("hbase.master.port", 0);
  conf.setInt("hbase.master.info.port", 0);
  conf.setInt("hbase.regionserver.port", 0);
  conf.setInt("hbase.regionserver.info.port", 0);

  testUtil.startMiniCluster();
  hBaseAdmin = testUtil.getHBaseAdmin();
  conn = testUtil.getConnection();
  txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector());
  txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector());
  txManager.startAndWait();
}
 
Example 7
Source File: EmbeddedHbase.java    From Eagle with Apache License 2.0 5 votes vote down vote up
public void start() {
	try {
 	util = new HBaseTestingUtility();
     Configuration conf= util.getConfiguration();
     conf.setInt("test.hbase.zookeeper.property.clientPort", port);
     conf.set("zookeeper.znode.parent", znode);
     conf.setInt("hbase.zookeeper.property.maxClientCnxns", 200);
     conf.setInt("hbase.master.info.port", -1);//avoid port clobbering
     // start mini hbase cluster
     hBaseCluster = util.startMiniCluster();
     Configuration config = hBaseCluster.getConf();
     
     config.set("zookeeper.session.timeout", "120000");
     config.set("hbase.zookeeper.property.tickTime", "6000");
     config.set(HConstants.HBASE_CLIENT_PAUSE, "3000");
     config.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "1");
     config.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, "60000");
     
     Runtime.getRuntime().addShutdownHook(new Thread() {
         @Override
         public void run() {
         	shutdown();
         }
     }); 
	}
	catch (Throwable t) {
		LOG.error("Got an exception: ",t);
	}
}
 
Example 8
Source File: TestFailedAppendAndSync.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws IOException {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  // Disable block cache.
  CONF.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
  dir = TEST_UTIL.getDataTestDir("TestHRegion").toString();
  tableName = TableName.valueOf(name.getMethodName());
}
 
Example 9
Source File: MockServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @param htu Testing utility to use
 * @param zkw If true, create a zkw.
 * @throws ZooKeeperConnectionException
 * @throws IOException
 */
public MockServer(final HBaseTestingUtility htu, final boolean zkw)
throws ZooKeeperConnectionException, IOException {
  this.htu = htu;
  this.zk = zkw?
    new ZKWatcher(htu.getConfiguration(), NAME.toString(), this, true):
    null;
}
 
Example 10
Source File: TestBlockReorderMultiBlocks.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  htu = new HBaseTestingUtility();
  htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
  htu.getConfiguration().setInt("dfs.replication", 3);
  htu.startMiniDFSCluster(3,
      new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});

  conf = htu.getConfiguration();
  cluster = htu.getDFSCluster();
  dfs = (DistributedFileSystem) FileSystem.get(conf);
}
 
Example 11
Source File: TestBulkLoadReplication.java    From hbase with Apache License 2.0 5 votes vote down vote up
protected void bulkLoadOnCluster(TableName tableName, byte[] row, byte[] value,
                               HBaseTestingUtility cluster) throws Exception {
  String bulkLoadFilePath = createHFileForFamilies(row, value, cluster.getConfiguration());
  copyToHdfs(bulkLoadFilePath, cluster.getDFSCluster());
  BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(cluster.getConfiguration());
  bulkLoadHFilesTool.bulkLoad(tableName, BULK_LOAD_BASE_DIR);
}
 
Example 12
Source File: TestRecoveredEditsReplayAndAbort.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws IOException {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  method = name.getMethodName();
  tableName = TableName.valueOf(method);
}
 
Example 13
Source File: ConnectionUtilIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static synchronized void setUp() throws Exception {
    hbaseTestUtil = new HBaseTestingUtility();
    conf = hbaseTestUtil.getConfiguration();
    setUpConfigForMiniCluster(conf);
    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase-test");
    hbaseTestUtil.startMiniCluster();
    Class.forName(PhoenixDriver.class.getName());
}
 
Example 14
Source File: TransactionAwareHTableTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  testUtil = new HBaseTestingUtility();
  conf = testUtil.getConfiguration();

  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
  conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());

  conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5);

  // Tune down the connection thread pool size
  conf.setInt("hbase.hconnection.threads.core", 5);
  conf.setInt("hbase.hconnection.threads.max", 10);
  // Tunn down handler threads in regionserver
  conf.setInt("hbase.regionserver.handler.count", 10);

  // Set to random port
  conf.setInt("hbase.master.port", 0);
  conf.setInt("hbase.master.info.port", 0);
  conf.setInt("hbase.regionserver.port", 0);
  conf.setInt("hbase.regionserver.info.port", 0);

  testUtil.startMiniCluster();
  hBaseAdmin = testUtil.getHBaseAdmin();
  txStateStorage =
      new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf),
          new TxMetricsCollector());
  txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector());
  txManager.startAndWait();
}
 
Example 15
Source File: TestRegionServerReportForDuty.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  testUtil = new HBaseTestingUtility();
  testUtil.startMiniDFSCluster(1);
  testUtil.startMiniZKCluster(1);
  testUtil.createRootDir();
  cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}
 
Example 16
Source File: TestBlocksScanned.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  conf = TEST_UTIL.getConfiguration();
  testDir = TEST_UTIL.getDataTestDir("TestBlocksScanned");
}
 
Example 17
Source File: TestFileLink.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0), true);
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1), true);
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2), true);
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 18
Source File: BalanceBooksTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
  testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
  conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());

  // Tune down the connection thread pool size
  conf.setInt("hbase.hconnection.threads.core", 5);
  conf.setInt("hbase.hconnection.threads.max", 10);
  // Tunn down handler threads in regionserver
  conf.setInt("hbase.regionserver.handler.count", 10);

  // Set to random port
  conf.setInt("hbase.master.port", 0);
  conf.setInt("hbase.master.info.port", 0);
  conf.setInt("hbase.regionserver.port", 0);
  conf.setInt("hbase.regionserver.info.port", 0);

  testUtil.startMiniCluster();

  String zkClusterKey = testUtil.getClusterKey(); // hostname:clientPort:parentZnode
  String zkQuorum = zkClusterKey.substring(0, zkClusterKey.lastIndexOf(':'));
  LOG.info("Zookeeper Quorum is running at {}", zkQuorum);
  conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkQuorum);

  Injector injector = Guice.createInjector(
      new ConfigModule(conf),
      new ZKModule(),
      new DiscoveryModules().getDistributedModules(),
      Modules.override(new TransactionModules().getDistributedModules())
          .with(new AbstractModule() {
            @Override
            protected void configure() {
              bind(TransactionStateStorage.class).to(InMemoryTransactionStateStorage.class).in(Scopes.SINGLETON);
            }
          }),
      new TransactionClientModule()
  );

  zkClientService = injector.getInstance(ZKClientService.class);
  zkClientService.startAndWait();

  // start a tx server
  txService = injector.getInstance(TransactionService.class);
  try {
    LOG.info("Starting transaction service");
    txService.startAndWait();
  } catch (Exception e) {
    LOG.error("Failed to start service: ", e);
    throw e;
  }

  Tests.waitForTxReady(injector.getInstance(TransactionSystemClient.class));
}
 
Example 19
Source File: ConnectionQueryServicesTestImpl.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
private Configuration setupServer(Configuration config) throws Exception {
    util = new HBaseTestingUtility(config);
    util.startMiniCluster();
    return util.getConfiguration();
}
 
Example 20
Source File: MutableIndexReplicationIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static void setupConfigsAndStartCluster() throws Exception {
        // cluster-1 lives at regular HBase home, so we don't need to change how phoenix handles
        // lookups
//        conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
        // smaller log roll size to trigger more events
        setUpConfigForMiniCluster(conf1);
        conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
        conf1.setInt("replication.source.size.capacity", 10240);
        conf1.setLong("replication.source.sleepforretries", 100);
        conf1.setInt("hbase.regionserver.maxlogs", 10);
        conf1.setLong("hbase.master.logcleaner.ttl", 10);
        conf1.setInt("zookeeper.recovery.retry", 1);
        conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
        conf1.setBoolean("dfs.support.append", true);
        conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
        conf1.setInt("replication.stats.thread.period.seconds", 5);
        conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);

        utility1 = new HBaseTestingUtility(conf1);
        utility1.startMiniZKCluster();
        MiniZooKeeperCluster miniZK = utility1.getZkCluster();
        // Have to reset conf1 in case zk cluster location different
        // than default
        conf1 = utility1.getConfiguration();
        zkw1 = new ZKWatcher(conf1, "cluster1", null, true);
        admin=ConnectionFactory.createConnection(conf1).getAdmin();
        LOGGER.info("Setup first Zk");

        // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs
        conf2 = HBaseConfiguration.create(conf1);
        conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
        conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
        conf2.setBoolean("dfs.support.append", true);
        conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);

        utility2 = new HBaseTestingUtility(conf2);
        utility2.setZkCluster(miniZK);
        zkw2 = new ZKWatcher(conf2, "cluster2", null, true);

        LOGGER.info("Setup second Zk");
        utility1.startMiniCluster(2);
        utility2.startMiniCluster(2);
      //replicate from cluster 1 -> cluster 2, but not back again
        admin.addReplicationPeer("1", new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()));
    }