Java Code Examples for org.apache.hadoop.fs.FileSystem.createNewFile()

The following are Jave code examples for showing how to use createNewFile() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestFileInputFormat.java   View Source Code Vote up 6 votes
public static List<Path> configureTestSimple(Configuration conf, FileSystem localFs)
    throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  localFs.mkdirs(base1);
  localFs.mkdirs(base2);

  Path in1File1 = new Path(base1, "file1");
  Path in1File2 = new Path(base1, "file2");
  localFs.createNewFile(in1File1);
  localFs.createNewFile(in1File2);

  Path in2File1 = new Path(base2, "file1");
  Path in2File2 = new Path(base2, "file2");
  localFs.createNewFile(in2File1);
  localFs.createNewFile(in2File2);
  List<Path> expectedPaths = Lists.newArrayList(in1File1, in1File2, in2File1,
      in2File2);
  return expectedPaths;
}
 
Example 2
Project: hadoop   File: TestFileInputFormat.java   View Source Code Vote up 6 votes
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
Example 3
Project: hadoop   File: TestContainerChecks.java   View Source Code Vote up 6 votes
@Test
public void testContainerChecksWithSas() throws Exception {
  testAccount = AzureBlobStorageTestAccount.create("",
      EnumSet.of(CreateOptions.UseSas));
  assumeNotNull(testAccount);
  CloudBlobContainer container = testAccount.getRealContainer();
  FileSystem fs = testAccount.getFileSystem();

  // The container shouldn't be there
  assertFalse(container.exists());

  // A write should just fail
  try {
    fs.createNewFile(new Path("/foo"));
    assertFalse("Should've thrown.", true);
  } catch (AzureException ex) {
  }
  assertFalse(container.exists());
}
 
Example 4
Project: scheduling-connector-for-hadoop   File: PBSApplicationMaster.java   View Source Code Vote up 6 votes
@Override
public ApplicationMasterRegisterResponse registerApplicationMaster(
    ApplicationMasterRegisterRequest request) throws IOException {
  String amHost = request.getHost();
  int amRpcPort = request.getPort();
  String trackingUrl = request.getTrackingUrl();
  
  int jobid = appAttemptId.getApplicationId().getId();

  String jobStatusFileName = jobid + "__" + amRpcPort + "__" + amHost + "__"
      + URLEncoder.encode(trackingUrl, HPCConfiguration.CHAR_ENCODING);
  String jobStatusLocation = conf.get(
      YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION,
      DEFAULT_YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION);
  FileSystem fileSystem = FileSystem.get(conf);
  Path statusFile = new Path(jobStatusLocation, jobStatusFileName);
  fileSystem.createNewFile(statusFile);
  fileSystem.deleteOnExit(statusFile);

  ApplicationMasterRegisterResponse response = new ApplicationMasterRegisterResponse();
  response.setMaxCapability(getMaxCapability());
  response.setQueue("default");
  return response;
}
 
Example 5
Project: ditb   File: TestHFileCleaner.java   View Source Code Vote up 6 votes
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  Path file = new Path(root, "file");
  fs.createNewFile(file);
  long createTime = System.currentTimeMillis();
  assertTrue("Test file not created!", fs.exists(file));
  TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
  // update the time info for the file, so the cleaner removes it
  fs.setTimes(file, createTime - 100, -1);
  Configuration conf = UTIL.getConfiguration();
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
  cleaner.setConf(conf);
  assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
      + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
Example 6
Project: ditb   File: TestCoprocessorStop.java   View Source Code Vote up 6 votes
@Override
public void stop(CoprocessorEnvironment env) throws IOException {
  String fileName = null;

  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    fileName = MASTER_FILE;
  } else if (env instanceof RegionServerCoprocessorEnvironment) {
    fileName = REGIONSERVER_FILE;
  } else if (env instanceof RegionCoprocessorEnvironment) {
    LOG.error("on RegionCoprocessorEnvironment!!");
  }

  Configuration conf = UTIL.getConfiguration();
  Path resultFile = new Path(UTIL.getDataTestDirOnTestFS(), fileName);
  FileSystem fs = FileSystem.get(conf);

  boolean result = fs.createNewFile(resultFile);
  LOG.info("create file " + resultFile + " return rc " + result);
}
 
Example 7
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   View Source Code Vote up 5 votes
/**
 * 此方法用于创建新文件
 * <p>
 * alluxio是否能穿透到hdfs要根据它的配置情况而定
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 创建新文件是否成功
 */
public static boolean createNewFile(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		if (!fs.exists(uri)) {
			return fs.createNewFile(uri);
		}
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
Example 8
Project: hadoop   File: TestFileInputFormat.java   View Source Code Vote up 5 votes
public static List<Path> configureTestNestedRecursive(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1).toString());
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inDir1 = new Path(base1, "dir1");
  Path inDir2 = new Path(base1, "dir2");
  Path inFile1 = new Path(base1, "file1");

  Path dir1File1 = new Path(inDir1, "file1");
  Path dir1File2 = new Path(inDir1, "file2");

  Path dir2File1 = new Path(inDir2, "file1");
  Path dir2File2 = new Path(inDir2, "file2");

  localFs.mkdirs(inDir1);
  localFs.mkdirs(inDir2);

  localFs.createNewFile(inFile1);
  localFs.createNewFile(dir1File1);
  localFs.createNewFile(dir1File2);
  localFs.createNewFile(dir2File1);
  localFs.createNewFile(dir2File2);

  List<Path> expectedPaths = Lists.newArrayList(inFile1, dir1File1,
      dir1File2, dir2File1, dir2File2);
  return expectedPaths;
}
 
Example 9
Project: hadoop   File: TestFileInputFormat.java   View Source Code Vote up 5 votes
public static List<Path> configureTestNestedNonRecursive(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1).toString());
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      false);
  localFs.mkdirs(base1);

  Path inDir1 = new Path(base1, "dir1");
  Path inDir2 = new Path(base1, "dir2");
  Path inFile1 = new Path(base1, "file1");

  Path dir1File1 = new Path(inDir1, "file1");
  Path dir1File2 = new Path(inDir1, "file2");

  Path dir2File1 = new Path(inDir2, "file1");
  Path dir2File2 = new Path(inDir2, "file2");

  localFs.mkdirs(inDir1);
  localFs.mkdirs(inDir2);

  localFs.createNewFile(inFile1);
  localFs.createNewFile(dir1File1);
  localFs.createNewFile(dir1File2);
  localFs.createNewFile(dir2File1);
  localFs.createNewFile(dir2File2);

  List<Path> expectedPaths = Lists.newArrayList(inFile1, inDir1, inDir2);
  return expectedPaths;
}
 
Example 10
Project: ditb   File: ZKSplitLog.java   View Source Code Vote up 5 votes
public static void markCorrupted(Path rootdir, String logFileName,
    FileSystem fs) {
  Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt");
  try {
    fs.createNewFile(file);
  } catch (IOException e) {
    LOG.warn("Could not flag a log file as corrupted. Failed to create " +
        file, e);
  }
}
 
Example 11
Project: ditb   File: TestSnapshotHFileCleaner.java   View Source Code Vote up 5 votes
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
Example 12
Project: ditb   File: TestHRegionFileSystem.java   View Source Code Vote up 5 votes
@Test
public void testTempAndCommit() throws IOException {
  Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit");
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Configuration conf = TEST_UTIL.getConfiguration();

  // Create a Region
  String familyName = "cf";
  HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri);

  // New region, no store files
  Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);

  // Create a new file in temp (no files in the family)
  Path buildPath = regionFs.createTempName();
  fs.createNewFile(buildPath);
  storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);

  // commit the file
  Path dstPath = regionFs.commitStoreFile(familyName, buildPath);
  storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
  assertFalse(fs.exists(buildPath));

  fs.delete(rootDir, true);
}
 
Example 13
Project: ditb   File: TestDefaultCompactSelection.java   View Source Code Vote up 5 votes
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
Example 14
Project: ditb   File: TestLogsCleaner.java   View Source Code Vote up 4 votes
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
 
Example 15
Project: ditb   File: TestHFileLinkCleaner.java   View Source Code Vote up 4 votes
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final TableName tableName = TableName.valueOf("test-table");
  final TableName tableLinkName = TableName.valueOf("test-link");
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(tableName);
  HRegionInfo hriLink = new HRegionInfo(tableLinkName);

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
      FSUtils.getTableDir(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
 
Example 16
Project: ditb   File: TestHFileCleaner.java   View Source Code Vote up 4 votes
@Test(timeout = 60 *1000)
public void testHFileCleaning() throws Exception {
  final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
  String prefix = "someHFileThatWouldBeAUUID";
  Configuration conf = UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
    "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
  FileSystem fs = FileSystem.get(conf);
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  final long createTime = System.currentTimeMillis();
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // Case 1: 1 invalid file, which should be deleted directly
  fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  LOG.debug("Now is: " + createTime);
  for (int i = 1; i < 32; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveHFileCleaner),
    Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
    fs.createNewFile(fileName);
    // set the creation time past ttl to ensure that it gets removed
    fs.setTimes(fileName, createTime - ttl - 1, -1);
    LOG.debug("Creating " + getFileStats(fileName, fs));
  }

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
  fs.createNewFile(saved);
  // set creation time within the ttl
  fs.setTimes(saved, createTime - ttl / 2, -1);
  LOG.debug("Creating " + getFileStats(saved, fs));
  for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
    LOG.debug(stat.getPath().toString());
  }

  assertEquals(33, fs.listStatus(archivedHfileDir).length);

  // set a custom edge manager to handle time checking
  EnvironmentEdge setTime = new EnvironmentEdge() {
    @Override
    public long currentTime() {
      return createTime;
    }
  };
  EnvironmentEdgeManager.injectEdge(setTime);

  // run the chore
  cleaner.chore();

  // ensure we only end up with the saved file
  assertEquals(1, fs.listStatus(archivedHfileDir).length);

  for (FileStatus file : fs.listStatus(archivedHfileDir)) {
    LOG.debug("Kept hfiles: " + file.getPath().getName());
  }

  // reset the edge back to the original edge
  EnvironmentEdgeManager.injectEdge(originalEdge);
}
 
Example 17
Project: ditb   File: TestHFileArchiving.java   View Source Code Vote up 4 votes
/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
  final long TEST_TIME = 20 * 1000;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");

  Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
  Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
  FileSystem fs = UTIL.getTestFileSystem();

  Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
      TableName.valueOf("table")), "abcdef");
  Path familyDir = new Path(regionDir, "cf");

  Path sourceRegionDir = new Path(rootDir, regionDir);
  fs.mkdirs(sourceRegionDir);

  Stoppable stoppable = new StoppableImplementation();

  // The cleaner should be looping without long pauses to reproduce the race condition.
  HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
  try {
    choreService.scheduleChore(cleaner);

    // Keep creating/archiving new files while the cleaner is running in the other thread
    long startTime = System.currentTimeMillis();
    for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
      Path file = new Path(familyDir,  String.valueOf(fid));
      Path sourceFile = new Path(rootDir, file);
      Path archiveFile = new Path(archiveDir, file);

      fs.createNewFile(sourceFile);

      try {
        // Try to archive the file
        HFileArchiver.archiveRegion(fs, rootDir,
            sourceRegionDir.getParent(), sourceRegionDir);

        // The archiver succeded, the file is no longer in the original location
        // but it's in the archive location.
        LOG.debug("hfile=" + fid + " should be in the archive");
        assertTrue(fs.exists(archiveFile));
        assertFalse(fs.exists(sourceFile));
      } catch (IOException e) {
        // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
        // in this case, the file should not be archived, and we should have the file
        // in the original location.
        LOG.debug("hfile=" + fid + " should be in the source location");
        assertFalse(fs.exists(archiveFile));
        assertTrue(fs.exists(sourceFile));

        // Avoid to have this file in the next run
        fs.delete(sourceFile, false);
      }
    }
  } finally {
    stoppable.stop("test end");
    cleaner.cancel(true);
    choreService.shutdown();
    fs.delete(rootDir, true);
  }
}