Java Code Examples for org.apache.hadoop.hbase.HConstants#HFILE_ARCHIVE_DIRECTORY

The following examples show how to use org.apache.hadoop.hbase.HConstants#HFILE_ARCHIVE_DIRECTORY . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testLargeSmallIsolation() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  // no cleaner policies = delete all files
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  conf.setInt(HFileCleaner.HFILE_DELETE_THROTTLE_THRESHOLD, 512 * 1024);
  Server server = new DummyServer();
  Path archivedHfileDir =
      new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  // setup the cleaner
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
  // clean up archive directory
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // necessary set up
  final int LARGE_FILE_NUM = 5;
  final int SMALL_FILE_NUM = 20;
  createFilesForTesting(LARGE_FILE_NUM, SMALL_FILE_NUM, fs, archivedHfileDir);
  // call cleanup
  cleaner.chore();

  Assert.assertEquals(LARGE_FILE_NUM, cleaner.getNumOfDeletedLargeFiles());
  Assert.assertEquals(SMALL_FILE_NUM, cleaner.getNumOfDeletedSmallFiles());
}
 
Example 2
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testThreadCleanup() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  Server server = new DummyServer();
  Path archivedHfileDir =
      new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  // setup the cleaner
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
  // clean up archive directory
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // create some file to delete
  fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
  // launch the chore
  cleaner.chore();
  // call cleanup
  cleaner.cleanup();
  // wait awhile for thread to die
  Thread.sleep(100);
  for (Thread thread : cleaner.getCleanerThreads()) {
    Assert.assertFalse(thread.isAlive());
  }
}
 
Example 3
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemovesEmptyDirectories() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  // no cleaner policies = delete all files
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  Server server = new DummyServer();
  Path archivedHfileDir =
    new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  // setup the cleaner
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);

  // make all the directories for archiving files
  Path table = new Path(archivedHfileDir, "table");
  Path region = new Path(table, "regionsomthing");
  Path family = new Path(region, "fam");
  Path file = new Path(family, "file12345");
  fs.mkdirs(family);
  if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
  fs.create(file).close();
  if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);

  // run the chore to cleanup the files (and the directories above it)
  cleaner.chore();

  // make sure all the parent directories get removed
  assertFalse("family directory not removed for empty directory", fs.exists(family));
  assertFalse("region directory not removed for empty directory", fs.exists(region));
  assertFalse("table directory not removed for empty directory", fs.exists(table));
  assertTrue("archive directory", fs.exists(archivedHfileDir));
}
 
Example 4
Source File: TestRestoreSnapshotHelper.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  rootDir = TEST_UTIL.getDataTestDir("testRestore");
  archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  fs = TEST_UTIL.getTestFileSystem();
  conf = TEST_UTIL.getConfiguration();
  setupConf(conf);
  CommonFSUtils.setRootDir(conf, rootDir);
}
 
Example 5
Source File: SnapshotTestingUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void deleteArchiveDirectory(final HBaseTestingUtility util)
    throws IOException {
  // Ensure the archiver to be empty
  MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
  mfs.getFileSystem().delete(archiveDir, true);
}
 
Example 6
Source File: TestSnapshotHFileCleaner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  CommonFSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = CommonFSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  final TableName tableName = TableName.valueOf(name.getMethodName());
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  RegionInfo mockRegion = RegionInfoBuilder.newBuilder(tableName).build();
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
Example 7
Source File: RestoreTool.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * return value represent path for:
 * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
 * @param tableName table name
 * @return path to table archive
 * @throws IOException exception
 */
Path getTableArchivePath(TableName tableName) throws IOException {
  Path baseDir =
      new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
          HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
  Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
  Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
  if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
    LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
    tableArchivePath = null; // empty table has no archive
  }
  return tableArchivePath;
}
 
Example 8
Source File: TestSnapshotFromMaster.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Setup the config for the cluster
 */
@BeforeClass
public static void setupCluster() throws Exception {
  setupConf(UTIL.getConfiguration());
  UTIL.startMiniCluster(NUM_RS);
  fs = UTIL.getDFSCluster().getFileSystem();
  master = UTIL.getMiniHBaseCluster().getMaster();
  rootDir = master.getMasterFileSystem().getRootDir();
  archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
}
 
Example 9
Source File: SnapshotScannerHDFSAclHelper.java    From hbase with Apache License 2.0 5 votes vote down vote up
PathHelper(Configuration conf) {
  this.conf = conf;
  rootDir = new Path(conf.get(HConstants.HBASE_DIR));
  tmpDataDir = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY),
      HConstants.BASE_NAMESPACE_DIR);
  dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
  mobDataDir = new Path(MobUtils.getMobHome(rootDir), HConstants.BASE_NAMESPACE_DIR);
  archiveDataDir = new Path(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
      HConstants.BASE_NAMESPACE_DIR);
  snapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
}
 
Example 10
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testHFileCleaning() throws Exception {
  final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
  String prefix = "someHFileThatWouldBeAUUID";
  Configuration conf = UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner," +
      "org.apache.hadoop.hbase.mob.ManualMobMaintHFileCleaner");
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  Path archivedHfileDir =
    new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
  FileSystem fs = FileSystem.get(conf);
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  final long createTime = System.currentTimeMillis();
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // Case 1: 1 invalid file, which should be deleted directly
  fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  LOG.debug("Now is: " + createTime);
  for (int i = 1; i < 32; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveHFileCleaner),
    Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
    fs.createNewFile(fileName);
    // set the creation time past ttl to ensure that it gets removed
    fs.setTimes(fileName, createTime - ttl - 1, -1);
    LOG.debug("Creating " + getFileStats(fileName, fs));
  }

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
  fs.createNewFile(saved);
  // set creation time within the ttl
  fs.setTimes(saved, createTime - ttl / 2, -1);
  LOG.debug("Creating " + getFileStats(saved, fs));
  for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
    LOG.debug(stat.getPath().toString());
  }

  assertEquals(33, fs.listStatus(archivedHfileDir).length);

  // set a custom edge manager to handle time checking
  EnvironmentEdge setTime = new EnvironmentEdge() {
    @Override
    public long currentTime() {
      return createTime;
    }
  };
  EnvironmentEdgeManager.injectEdge(setTime);

  // run the chore
  cleaner.chore();

  // ensure we only end up with the saved file
  assertEquals(1, fs.listStatus(archivedHfileDir).length);

  for (FileStatus file : fs.listStatus(archivedHfileDir)) {
    LOG.debug("Kept hfiles: " + file.getPath().getName());
  }

  // reset the edge back to the original edge
  EnvironmentEdgeManager.injectEdge(originalEdge);
}
 
Example 11
Source File: HFileContentValidator.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Check HFile contents are readable by HBase 2.
 *
 * @param conf used configuration
 * @return number of HFiles corrupted HBase
 * @throws IOException if a remote or network exception occurs
 */
private boolean validateHFileContent(Configuration conf) throws IOException {
  FileSystem fileSystem = CommonFSUtils.getCurrentFileSystem(conf);

  ExecutorService threadPool = createThreadPool(conf);
  HFileCorruptionChecker checker;

  try {
    checker = new HFileCorruptionChecker(conf, threadPool, false);

    Path rootDir = CommonFSUtils.getRootDir(conf);
    LOG.info("Validating HFile contents under {}", rootDir);

    Collection<Path> tableDirs = FSUtils.getTableDirs(fileSystem, rootDir);
    checker.checkTables(tableDirs);

    Path archiveRootDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
    LOG.info("Validating HFile contents under {}", archiveRootDir);

    List<Path> archiveTableDirs = FSUtils.getTableDirs(fileSystem, archiveRootDir);
    checker.checkTables(archiveTableDirs);
  } finally {
    threadPool.shutdown();

    try {
      threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
    }
  }

  int checkedFiles = checker.getHFilesChecked();
  Collection<Path> corrupted = checker.getCorrupted();

  if (corrupted.isEmpty()) {
    LOG.info("Checked {} HFiles, none of them are corrupted.", checkedFiles);
    LOG.info("There are no incompatible HFiles.");

    return true;
  } else {
    LOG.info("Checked {} HFiles, {} are corrupted.", checkedFiles, corrupted.size());

    for (Path path : corrupted) {
      LOG.info("Corrupted file: {}", path);
    }

    LOG.info("Change data block encodings before upgrading. "
        + "Check https://s.apache.org/prefixtree for instructions.");

    return false;
  }
}
 
Example 12
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testOnConfigurationChange() throws Exception {
  // constants
  final int ORIGINAL_THROTTLE_POINT = 512 * 1024;
  final int ORIGINAL_QUEUE_INIT_SIZE = 512;
  final int UPDATE_THROTTLE_POINT = 1024;// small enough to change large/small check
  final int UPDATE_QUEUE_INIT_SIZE = 1024;
  final int LARGE_FILE_NUM = 5;
  final int SMALL_FILE_NUM = 20;
  final int LARGE_THREAD_NUM = 2;
  final int SMALL_THREAD_NUM = 4;
  final long THREAD_TIMEOUT_MSEC = 30 * 1000L;
  final long THREAD_CHECK_INTERVAL_MSEC = 500L;

  Configuration conf = UTIL.getConfiguration();
  // no cleaner policies = delete all files
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  conf.setInt(HFileCleaner.HFILE_DELETE_THROTTLE_THRESHOLD, ORIGINAL_THROTTLE_POINT);
  conf.setInt(HFileCleaner.LARGE_HFILE_QUEUE_INIT_SIZE, ORIGINAL_QUEUE_INIT_SIZE);
  conf.setInt(HFileCleaner.SMALL_HFILE_QUEUE_INIT_SIZE, ORIGINAL_QUEUE_INIT_SIZE);
  Server server = new DummyServer();
  Path archivedHfileDir =
      new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  // setup the cleaner
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  final HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
  Assert.assertEquals(ORIGINAL_THROTTLE_POINT, cleaner.getThrottlePoint());
  Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getLargeQueueInitSize());
  Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getSmallQueueInitSize());
  Assert.assertEquals(HFileCleaner.DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC,
      cleaner.getCleanerThreadTimeoutMsec());
  Assert.assertEquals(HFileCleaner.DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
      cleaner.getCleanerThreadCheckIntervalMsec());

  // clean up archive directory and create files for testing
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  createFilesForTesting(LARGE_FILE_NUM, SMALL_FILE_NUM, fs, archivedHfileDir);

  // call cleaner, run as daemon to test the interrupt-at-middle case
  Thread t = new Thread() {
    @Override
    public void run() {
      cleaner.chore();
    }
  };
  t.setDaemon(true);
  t.start();
  // wait until file clean started
  while (cleaner.getNumOfDeletedSmallFiles() == 0) {
    Thread.yield();
  }

  // trigger configuration change
  Configuration newConf = new Configuration(conf);
  newConf.setInt(HFileCleaner.HFILE_DELETE_THROTTLE_THRESHOLD, UPDATE_THROTTLE_POINT);
  newConf.setInt(HFileCleaner.LARGE_HFILE_QUEUE_INIT_SIZE, UPDATE_QUEUE_INIT_SIZE);
  newConf.setInt(HFileCleaner.SMALL_HFILE_QUEUE_INIT_SIZE, UPDATE_QUEUE_INIT_SIZE);
  newConf.setInt(HFileCleaner.LARGE_HFILE_DELETE_THREAD_NUMBER, LARGE_THREAD_NUM);
  newConf.setInt(HFileCleaner.SMALL_HFILE_DELETE_THREAD_NUMBER, SMALL_THREAD_NUM);
  newConf.setLong(HFileCleaner.HFILE_DELETE_THREAD_TIMEOUT_MSEC, THREAD_TIMEOUT_MSEC);
  newConf.setLong(HFileCleaner.HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
      THREAD_CHECK_INTERVAL_MSEC);

  LOG.debug("File deleted from large queue: " + cleaner.getNumOfDeletedLargeFiles()
      + "; from small queue: " + cleaner.getNumOfDeletedSmallFiles());
  cleaner.onConfigurationChange(newConf);

  // check values after change
  Assert.assertEquals(UPDATE_THROTTLE_POINT, cleaner.getThrottlePoint());
  Assert.assertEquals(UPDATE_QUEUE_INIT_SIZE, cleaner.getLargeQueueInitSize());
  Assert.assertEquals(UPDATE_QUEUE_INIT_SIZE, cleaner.getSmallQueueInitSize());
  Assert.assertEquals(LARGE_THREAD_NUM + SMALL_THREAD_NUM, cleaner.getCleanerThreads().size());
  Assert.assertEquals(THREAD_TIMEOUT_MSEC, cleaner.getCleanerThreadTimeoutMsec());
  Assert.assertEquals(THREAD_CHECK_INTERVAL_MSEC, cleaner.getCleanerThreadCheckIntervalMsec());

  // make sure no cost when onConfigurationChange called with no change
  List<Thread> oldThreads = cleaner.getCleanerThreads();
  cleaner.onConfigurationChange(newConf);
  List<Thread> newThreads = cleaner.getCleanerThreads();
  Assert.assertArrayEquals(oldThreads.toArray(), newThreads.toArray());

  // wait until clean done and check
  t.join();
  LOG.debug("File deleted from large queue: " + cleaner.getNumOfDeletedLargeFiles()
      + "; from small queue: " + cleaner.getNumOfDeletedSmallFiles());
  Assert.assertTrue("Should delete more than " + LARGE_FILE_NUM
      + " files from large queue but actually " + cleaner.getNumOfDeletedLargeFiles(),
    cleaner.getNumOfDeletedLargeFiles() > LARGE_FILE_NUM);
  Assert.assertTrue("Should delete less than " + SMALL_FILE_NUM
      + " files from small queue but actually " + cleaner.getNumOfDeletedSmallFiles(),
    cleaner.getNumOfDeletedSmallFiles() < SMALL_FILE_NUM);
}
 
Example 13
Source File: SnapshotScannerHDFSAclHelper.java    From hbase with Apache License 2.0 4 votes vote down vote up
Path getArchiveDir() {
  return new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
}
 
Example 14
Source File: TestZooKeeperTableArchiveClient.java    From hbase with Apache License 2.0 4 votes vote down vote up
private Path getArchiveDir() throws IOException {
  return new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
}
 
Example 15
Source File: TestHFileArchiving.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
  final long TEST_TIME = 20 * 1000;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");

  Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
  Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
  FileSystem fs = UTIL.getTestFileSystem();

  Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"),
      TableName.valueOf(name.getMethodName())), "abcdef");
  Path familyDir = new Path(regionDir, "cf");

  Path sourceRegionDir = new Path(rootDir, regionDir);
  fs.mkdirs(sourceRegionDir);

  Stoppable stoppable = new StoppableImplementation();

  // The cleaner should be looping without long pauses to reproduce the race condition.
  HFileCleaner cleaner = getHFileCleaner(stoppable, conf, fs, archiveDir);
  assertNotNull("cleaner should not be null", cleaner);
  try {
    choreService.scheduleChore(cleaner);
    // Keep creating/archiving new files while the cleaner is running in the other thread
    long startTime = System.currentTimeMillis();
    for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
      Path file = new Path(familyDir,  String.valueOf(fid));
      Path sourceFile = new Path(rootDir, file);
      Path archiveFile = new Path(archiveDir, file);

      fs.createNewFile(sourceFile);

      try {
        // Try to archive the file
        HFileArchiver.archiveRegion(fs, rootDir,
            sourceRegionDir.getParent(), sourceRegionDir);

        // The archiver succeded, the file is no longer in the original location
        // but it's in the archive location.
        LOG.debug("hfile=" + fid + " should be in the archive");
        assertTrue(fs.exists(archiveFile));
        assertFalse(fs.exists(sourceFile));
      } catch (IOException e) {
        // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
        // in this case, the file should not be archived, and we should have the file
        // in the original location.
        LOG.debug("hfile=" + fid + " should be in the source location");
        assertFalse(fs.exists(archiveFile));
        assertTrue(fs.exists(sourceFile));

        // Avoid to have this file in the next run
        fs.delete(sourceFile, false);
      }
    }
  } finally {
    stoppable.stop("test end");
    cleaner.cancel(true);
    choreService.shutdown();
    fs.delete(rootDir, true);
  }
}
 
Example 16
Source File: HBaseInterClusterReplicationEndpoint.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
public void init(Context context) throws IOException {
  super.init(context);
  this.conf = HBaseConfiguration.create(ctx.getConfiguration());
  decorateConf();
  this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
  this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
      maxRetriesMultiplier);
  // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator
  // tasks to terminate when doStop() is called.
  long maxTerminationWaitMultiplier = this.conf.getLong(
      "replication.source.maxterminationmultiplier",
      DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER);
  this.maxTerminationWait = maxTerminationWaitMultiplier *
      this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
  // TODO: This connection is replication specific or we should make it particular to
  // replication and make replication specific settings such as compression or codec to use
  // passing Cells.
  this.conn = createConnection(this.conf);
  this.sleepForRetries =
      this.conf.getLong("replication.source.sleepforretries", 1000);
  this.metrics = context.getMetrics();
  // ReplicationQueueInfo parses the peerId out of the znode for us
  this.replicationSinkMgr = createReplicationSinkManager(conn);
  // per sink thread pool
  this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
    HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
  this.exec = Threads.getBoundedCachedThreadPool(maxThreads, 60, TimeUnit.SECONDS,
      new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build());
  this.abortable = ctx.getAbortable();
  // Set the size limit for replication RPCs to 95% of the max request size.
  // We could do with less slop if we have an accurate estimate of encoded size. Being
  // conservative for now.
  this.replicationRpcLimit = (int)(0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE,
    RpcServer.DEFAULT_MAX_REQUEST_SIZE));
  this.dropOnDeletedTables =
      this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
  this.dropOnDeletedColumnFamilies = this.conf
      .getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false);

  this.replicationBulkLoadDataEnabled =
      conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
        HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
  if (this.replicationBulkLoadDataEnabled) {
    replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID);
  }
  // Construct base namespace directory and hfile archive directory path
  Path rootDir = CommonFSUtils.getRootDir(conf);
  Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR);
  baseNamespaceDir = new Path(rootDir, baseNSDir);
  hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir));
  isSerial = context.getPeerConfig().isSerial();
}
 
Example 17
Source File: HFileArchiveUtil.java    From hbase with Apache License 2.0 2 votes vote down vote up
/**
 * Get the full path to the archive directory on the configured
 * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
 * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
 *          the archive path)
 * @return the full {@link Path} to the archive directory, as defined by the configuration
 */
private static Path getArchivePath(final Path rootdir) {
  return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
}