Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility.getConfiguration()

The following are Jave code examples for showing how to use getConfiguration() of the org.apache.hadoop.hbase.HBaseTestingUtility class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: OfflineMetaRebuildTestCore.java   Source Code and License Vote up 6 votes
@Before
public void setUpBefore() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
  TEST_UTIL.startMiniCluster(3);
  conf = TEST_UTIL.getConfiguration();
  this.connection = ConnectionFactory.createConnection(conf);
  assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);

  // setup the table
  table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
  tableIdx++;
  htbl = setupTable(table);
  populateTable(htbl);
  assertEquals(5, scanMeta());
  LOG.info("Table " + table + " has " + tableRowCount(conf, table)
      + " entries.");
  assertEquals(16, tableRowCount(conf, table));
  TEST_UTIL.getHBaseAdmin().disableTable(table);
  assertEquals(1, TEST_UTIL.getHBaseAdmin().listTables().length);
}
 
Example 2
Project: ditb   File: TestFSUtils.java   Source Code and License Vote up 6 votes
private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();
  conf.set(HConstants.WAL_STORAGE_POLICY, policy);

  MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
  try {
    assertTrue(FSUtils.isHDFS(conf));

    FileSystem fs = FileSystem.get(conf);
    Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
    fs.mkdirs(testDir);

    FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
        HConstants.DEFAULT_WAL_STORAGE_POLICY);

    String file = UUID.randomUUID().toString();
    Path p = new Path(testDir, file);
    WriteDataToHDFS(fs, p, 4096);
    // will assert existance before deleting.
    cleanupFile(fs, testDir);
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Project: ditb   File: TestAccessControlFilter.java   Source Code and License Vote up 6 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  enableSecurity(conf);
  verifyConfiguration(conf);

  // We expect 0.98 scanning semantics
  conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);

  READER = User.createUserForTesting(conf, "reader", new String[0]);
  LIMITED = User.createUserForTesting(conf, "limited", new String[0]);
  DENIED = User.createUserForTesting(conf, "denied", new String[0]);
}
 
Example 4
Project: ditb   File: TestHRegionInfo.java   Source Code and License Vote up 6 votes
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  Path basedir = htu.getDataTestDir();
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
  // Create a region.  That'll write the .regioninfo file.
  HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
    fsTableDescriptors.get(TableName.META_TABLE_NAME));
  // Get modtime on the file.
  long modtime = getModTime(r);
  HRegion.closeHRegion(r);
  Thread.sleep(1001);
  r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
    null, htu.getConfiguration());
  // Ensure the file is not written for a second time.
  long modtime2 = getModTime(r);
  assertEquals(modtime, modtime2);
  // Now load the file.
  HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
  assertTrue(hri.equals(deserializedHri));
}
 
Example 5
Project: ditb   File: TestReplicationWALReaderManager.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  conf = TEST_UTIL.getConfiguration();
  TEST_UTIL.startMiniDFSCluster(3);

  cluster = TEST_UTIL.getDFSCluster();
  fs = cluster.getFileSystem();
}
 
Example 6
Project: ditb   File: TestReplicationStateZKImpl.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  utility = new HBaseTestingUtility();
  utility.startMiniZKCluster();
  conf = utility.getConfiguration();
  zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
  String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
  replicationZNode = ZKUtil.joinZNode(zkw.baseZNode, replicationZNodeName);
  KEY_ONE = initPeerClusterState("/hbase1");
  KEY_TWO = initPeerClusterState("/hbase2");
}
 
Example 7
Project: ditb   File: TestTokenAuthentication.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniZKCluster();
  // register token type for protocol
  SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(),
    new SecurityInfo("hbase.test.kerberos.principal",
      AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN));
  // security settings only added after startup so that ZK does not require SASL
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set("hadoop.security.authentication", "kerberos");
  conf.set("hbase.security.authentication", "kerberos");
  conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true);
  server = new TokenServer(conf);
  serverThread = new Thread(server);
  Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString());
  // wait for startup
  while (!server.isStarted() && !server.isStopped()) {
    Thread.sleep(10);
  }
  server.rpcServer.refreshAuthManager(new PolicyProvider() {
    @Override
    public Service[] getServices() {
      return new Service [] {
        new Service("security.client.protocol.acl",
          AuthenticationProtos.AuthenticationService.BlockingInterface.class)};
    }
  });
  ZKClusterId.setClusterId(server.getZooKeeper(), clusterId);
  secretManager = (AuthenticationTokenSecretManager)server.getSecretManager();
  while(secretManager.getCurrentKey() == null) {
    Thread.sleep(1);
  }
}
 
Example 8
Project: ditb   File: TestZKSecretWatcher.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniZKCluster();
  Configuration conf = TEST_UTIL.getConfiguration();

  ZooKeeperWatcher zk = newZK(conf, "server1", new MockAbortable());
  AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2];
  tmp[0] = new AuthenticationTokenSecretManagerForTest(
      conf, zk, "server1", 60*60*1000, 60*1000);
  tmp[0].start();

  zk = newZK(conf, "server2", new MockAbortable());
  tmp[1] = new AuthenticationTokenSecretManagerForTest(
      conf, zk, "server2", 60*60*1000, 60*1000);
  tmp[1].start();

  while (KEY_MASTER == null) {
    for (int i=0; i<2; i++) {
      if (tmp[i].isMaster()) {
        KEY_MASTER = tmp[i];
        KEY_SLAVE = tmp[ (i+1) % 2 ];
        break;
      }
    }
    Thread.sleep(500);
  }
  LOG.info("Master is "+KEY_MASTER.getName()+
      ", slave is "+KEY_SLAVE.getName());
}
 
Example 9
Project: ditb   File: TestFSUtils.java   Source Code and License Vote up 5 votes
@Test
public void testDeleteAndExists() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();
  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  FileSystem fs = FileSystem.get(conf);
  FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
  // then that the correct file is created
  String file = UUID.randomUUID().toString();
  Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
  Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null);
    out.close();
    assertTrue("The created file should be present", FSUtils.isExists(fs, p));
    // delete the file with recursion as false. Only the file will be deleted.
    FSUtils.delete(fs, p, false);
    // Create another file
    FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null);
    out1.close();
    // delete the file with recursion as false. Still the file only will be deleted
    FSUtils.delete(fs, p1, true);
    assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
    // and then cleanup
  } finally {
    FSUtils.delete(fs, p, true);
    FSUtils.delete(fs, p1, true);
  }
}
 
Example 10
Project: ditb   File: TableSnapshotInputFormatTestBase.java   Source Code and License Vote up 5 votes
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
  String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
  throws Exception {
  try {
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getHBaseAdmin();

  // put some stuff in the table
  HTable table = new HTable(util.getConfiguration(), tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = FSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
    Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  // load different values
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  // cause flush to create new files in the region
  admin.flush(tableName);
  table.close();
}
 
Example 11
Project: ditb   File: TestRegionServerReportForDuty.java   Source Code and License Vote up 5 votes
@Before
public void setUp() throws Exception {
  testUtil = new HBaseTestingUtility();
  testUtil.startMiniDFSCluster(1);
  testUtil.startMiniZKCluster(1);
  testUtil.createRootDir();
  cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}
 
Example 12
Project: ditb   File: TestFSUtils.java   Source Code and License Vote up 5 votes
@Test
public void testRenameAndSetModifyTime() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();

  MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
  assertTrue(FSUtils.isHDFS(conf));

  FileSystem fs = FileSystem.get(conf);
  Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");

  String file = UUID.randomUUID().toString();
  Path p = new Path(testDir, file);

  FSDataOutputStream out = fs.create(p);
  out.close();
  assertTrue("The created file should be present", FSUtils.isExists(fs, p));

  long expect = System.currentTimeMillis() + 1000;
  assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());

  ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
  mockEnv.setValue(expect);
  EnvironmentEdgeManager.injectEdge(mockEnv);
  try {
    String dstFile = UUID.randomUUID().toString();
    Path dst = new Path(testDir , dstFile);

    assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
    assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
    assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));

    assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
    cluster.shutdown();
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
Example 13
Project: ditb   File: TestRowCountEndpoint.java   Source Code and License Vote up 5 votes
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowCountEndpoint.class.getName());

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.createTable(TEST_TABLE, new byte[][]{TEST_FAMILY});
}
 
Example 14
Project: ditb   File: MockServer.java   Source Code and License Vote up 5 votes
/**
 * @param htu Testing utility to use
 * @param zkw If true, create a zkw.
 * @throws ZooKeeperConnectionException
 * @throws IOException
 */
public MockServer(final HBaseTestingUtility htu, final boolean zkw)
throws ZooKeeperConnectionException, IOException {
  this.htu = htu;
  this.zk = zkw?
    new ZooKeeperWatcher(htu.getConfiguration(), NAME.toString(), this, true):
    null;
}
 
Example 15
Project: ditb   File: TestTableSnapshotInputFormat.java   Source Code and License Vote up 5 votes
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    Job job = new Job(util.getConfiguration());
    Scan scan = new Scan(startRow, endRow); // limit the scan

    job.setJarByClass(util.getClass());
    TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
      TestTableSnapshotInputFormat.class);

    TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
      scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
      NullWritable.class, job, true, tableDir);

    job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    job.setNumReduceTasks(1);
    job.setOutputFormatClass(NullOutputFormat.class);

    Assert.assertTrue(job.waitForCompletion(true));
  } finally {
    if (!shutdownCluster) {
      util.getHBaseAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
Example 16
Project: ditb   File: TestCleanerChore.java   Source Code and License Vote up 4 votes
/**
 * The cleaner runs in a loop, where it first checks to see all the files under a directory can be
 * deleted. If they all can, then we try to delete the directory. However, a file may be added
 * that directory to after the original check. This ensures that we don't accidentally delete that
 * directory on and don't get spurious IOExceptions.
 * <p>
 * This was from HBASE-7465.
 * @throws Exception on failure
 */
@Test
public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
  Stoppable stop = new StoppableImplementation();
  // need to use a localutil to not break the rest of the test that runs on the local FS, which
  // gets hosed when we start to use a minicluster.
  HBaseTestingUtility localUtil = new HBaseTestingUtility();
  Configuration conf = localUtil.getConfiguration();
  final Path testDir = UTIL.getDataTestDir();
  final FileSystem fs = UTIL.getTestFileSystem();
  LOG.debug("Writing test data to: " + testDir);
  String confKey = "hbase.test.cleaner.delegates";
  conf.set(confKey, AlwaysDelete.class.getName());

  AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
  // spy on the delegate to ensure that we don't check for directories
  AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
  AlwaysDelete spy = Mockito.spy(delegate);
  chore.cleanersChain.set(0, spy);

  // create the directory layout in the directory to clean
  final Path parent = new Path(testDir, "parent");
  Path file = new Path(parent, "someFile");
  fs.mkdirs(parent);
  // touch a new file
  fs.create(file).close();
  assertTrue("Test file didn't get created.", fs.exists(file));
  final Path racyFile = new Path(parent, "addedFile");

  // when we attempt to delete the original file, add another file in the same directory
  Mockito.doAnswer(new Answer<Boolean>() {
    @Override
    public Boolean answer(InvocationOnMock invocation) throws Throwable {
      fs.create(racyFile).close();
      FSUtils.logFileSystemState(fs, testDir, LOG);
      return (Boolean) invocation.callRealMethod();
    }
  }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));

  // attempt to delete the directory, which
  if (chore.checkAndDeleteDirectory(parent)) {
    throw new Exception(
        "Reported success deleting directory, should have failed when adding file mid-iteration");
  }

  // make sure all the directories + added file exist, but the original file is deleted
  assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
  assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
  assertFalse("Original file unexpectedly retained", fs.exists(file));
  Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
}
 
Example 17
Project: ditb   File: TestMasterRestartAfterDisablingTable.java   Source Code and License Vote up 4 votes
@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
    throws Exception {
  final int NUM_MASTERS = 2;
  final int NUM_RS = 1;
  final int NUM_REGIONS_TO_CREATE = 4;

  // Start the cluster
  log("Starting cluster");
  Configuration conf = HBaseConfiguration.create();
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  log("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testmasterRestart", null);
  HMaster master = cluster.getMaster();

  // Create a table with regions
  TableName table = TableName.valueOf("tableRestart");
  byte[] family = Bytes.toBytes("family");
  log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
  HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
  int numRegions = -1;
  try (RegionLocator r = ht.getRegionLocator()) {
    numRegions = r.getStartKeys().length;
  }
  numRegions += 1; // catalogs
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Disabling table\n");
  TEST_UTIL.getHBaseAdmin().disableTable(table);

  NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals(
      "The number of regions for the table tableRestart should be 0 and only"
          + "the catalog and namespace tables should be present.", 2, regions.size());

  List<MasterThread> masterThreads = cluster.getMasterThreads();
  MasterThread activeMaster = null;
  if (masterThreads.get(0).getMaster().isActiveMaster()) {
    activeMaster = masterThreads.get(0);
  } else {
    activeMaster = masterThreads.get(1);
  }
  activeMaster.getMaster().stop(
      "stopping the active master so that the backup can become active");
  cluster.hbaseCluster.waitOnMaster(activeMaster);
  cluster.waitForActiveAndReadyMaster();

  assertTrue("The table should not be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager().isTableState(
      TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
      ZooKeeperProtos.Table.State.DISABLING));
  log("Enabling table\n");
  // Need a new Admin, the previous one is on the old master
  Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  admin.enableTable(table);
  admin.close();
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Verifying there are " + numRegions + " assigned on cluster\n");
  regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The assigned regions were not onlined after master"
      + " switch except for the catalog and namespace tables.",
        6, regions.size());
  assertTrue("The table should be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager()
      .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
  ht.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
Example 18
Project: ditb   File: TestMasterFailover.java   Source Code and License Vote up 4 votes
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
    throws Exception {
  LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
  final int NUM_MASTERS = 1;
  final int NUM_RS = 2;

  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.master.info.port", -1);
  conf.setBoolean("hbase.assignment.usezk", true);

  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  // Find regionserver carrying meta.
  List<RegionServerThread> regionServerThreads =
    cluster.getRegionServerThreads();
  Region metaRegion = null;
  HRegionServer metaRegionServer = null;
  for (RegionServerThread regionServerThread : regionServerThreads) {
    HRegionServer regionServer = regionServerThread.getRegionServer();
    metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
    regionServer.abort("");
    if (null != metaRegion) {
      metaRegionServer = regionServer;
      break;
    }
  }

  TEST_UTIL.shutdownMiniHBaseCluster();

  // Create a ZKW to use in the test
  ZooKeeperWatcher zkw =
    HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
        metaRegion, metaRegionServer.getServerName());

  LOG.info("Staring cluster for second time");
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);

  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  while (!master.isInitialized()) {
    Thread.sleep(100);
  }
  // Failover should be completed, now wait for no RIT
  log("Waiting for no more RIT");
  ZKAssign.blockUntilNoRIT(zkw);

  zkw.close();
  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
Example 19
Project: ditb   File: TestFileLink.java   Source Code and License Vote up 4 votes
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0), true);
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1), true);
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2), true);
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 20
Project: ditb   File: TestImportTsv.java   Source Code and License Vote up 4 votes
/**
 * Run an ImportTsv job and perform basic validation on the results.
 * Returns the ImportTsv <code>Tool</code> instance so that other tests can
 * inspect it for further validation as necessary. This method is static to
 * insure non-reliance on instance's util/conf facilities.
 * @param args Any arguments to pass BEFORE inputFile path is appended.
 * @return The Tool instance used to run the test.
 */
protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data,
    String[] args, int valueMultiplier, int expectedKVCount)
throws Exception {
  String table = args[args.length - 1];
  Configuration conf = new Configuration(util.getConfiguration());

  // populate input file
  FileSystem fs = FileSystem.get(conf);
  Path inputPath = fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table), "input.dat"));
  FSDataOutputStream op = fs.create(inputPath, true);
  if (data == null) {
    data = "KEY\u001bVALUE1\u001bVALUE2\n";
  }
  op.write(Bytes.toBytes(data));
  op.close();
  LOG.debug(String.format("Wrote test data to file: %s", inputPath));

  if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
    LOG.debug("Forcing combiner.");
    conf.setInt("mapreduce.map.combine.minspills", 1);
  }

  // run the import
  List<String> argv = new ArrayList<String>(Arrays.asList(args));
  argv.add(inputPath.toString());
  Tool tool = new ImportTsv();
  LOG.debug("Running ImportTsv with arguments: " + argv);
  assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args)));

  // Perform basic validation. If the input args did not include
  // ImportTsv.BULK_OUTPUT_CONF_KEY then validate data in the table.
  // Otherwise, validate presence of hfiles.
  boolean createdHFiles = false;
  String outputPath = null;
  for (String arg : argv) {
    if (arg.contains(ImportTsv.BULK_OUTPUT_CONF_KEY)) {
      createdHFiles = true;
      // split '-Dfoo=bar' on '=' and keep 'bar'
      outputPath = arg.split("=")[1];
      break;
    }
  }

  if (createdHFiles)
    validateHFiles(fs, outputPath, family, expectedKVCount);
  else
    validateTable(conf, TableName.valueOf(table), family, valueMultiplier);

  if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) {
    LOG.debug("Deleting test subdirectory");
    util.cleanupDataTestDirOnTestFS(table);
  }
  return tool;
}