Java Code Examples for org.apache.hadoop.fs.FileSystem#createNewFile()

The following examples show how to use org.apache.hadoop.fs.FileSystem#createNewFile() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexUpdateOutputFormat.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public RecordWriter<Shard, Text> getRecordWriter(final FileSystem fs,
    JobConf job, String name, final Progressable progress)
    throws IOException {

  final Path perm = new Path(getWorkOutputPath(job), name);

  return new RecordWriter<Shard, Text>() {
    public void write(Shard key, Text value) throws IOException {
      assert (IndexUpdateReducer.DONE.equals(value));

      String shardName = key.getDirectory();
      shardName = shardName.replace("/", "_");

      Path doneFile =
          new Path(perm, IndexUpdateReducer.DONE + "_" + shardName);
      if (!fs.exists(doneFile)) {
        fs.createNewFile(doneFile);
      }
    }

    public void close(final Reporter reporter) throws IOException {
    }
  };
}
 
Example 2
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testManualMobCleanerLetsNonMobGo() throws IOException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  TableName table = TableName.valueOf("testManualMobCleanerLetsNonMobGo");
  Path nonmob = HFileArchiveUtil.getRegionArchiveDir(root, table,
    RegionInfoBuilder.newBuilder(table).build().getEncodedName());
  Path family= new Path(nonmob, "family");

  Path file = new Path(family, "someHFileThatWouldBeAUUID");
  fs.createNewFile(file);
  assertTrue("Test file not created!", fs.exists(file));

  ManualMobMaintHFileCleaner cleaner = new ManualMobMaintHFileCleaner();

  assertTrue("Non-Mob File should have been deletable. check path. '"+file+"'",
      cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
Example 3
Source File: TestContainerChecks.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testContainerChecksWithSas() throws Exception {
  testAccount = AzureBlobStorageTestAccount.create("",
      EnumSet.of(CreateOptions.UseSas));
  assumeNotNull(testAccount);
  CloudBlobContainer container = testAccount.getRealContainer();
  FileSystem fs = testAccount.getFileSystem();

  // The container shouldn't be there
  assertFalse(container.exists());

  // A write should just fail
  try {
    fs.createNewFile(new Path("/foo"));
    assertFalse("Should've thrown.", true);
  } catch (AzureException ex) {
  }
  assertFalse(container.exists());
}
 
Example 4
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testManualMobCleanerStopsMobRemoval() throws IOException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  TableName table = TableName.valueOf("testManualMobCleanerStopsMobRemoval");
  Path mob = HFileArchiveUtil.getRegionArchiveDir(root, table,
      MobUtils.getMobRegionInfo(table).getEncodedName());
  Path family= new Path(mob, "family");

  Path file = new Path(family, "someHFileThatWouldBeAUUID");
  fs.createNewFile(file);
  assertTrue("Test file not created!", fs.exists(file));

  ManualMobMaintHFileCleaner cleaner = new ManualMobMaintHFileCleaner();

  assertFalse("Mob File shouldn't have been deletable. check path. '"+file+"'",
      cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
Example 5
Source File: IndexUpdateOutputFormat.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public RecordWriter<Shard, Text> getRecordWriter(final FileSystem fs,
    JobConf job, String name, final Progressable progress)
    throws IOException {

  final Path perm = new Path(getWorkOutputPath(job), name);

  return new RecordWriter<Shard, Text>() {
    public void write(Shard key, Text value) throws IOException {
      assert (IndexUpdateReducer.DONE.equals(value));

      String shardName = key.getDirectory();
      shardName = shardName.replace("/", "_");

      Path doneFile =
          new Path(perm, IndexUpdateReducer.DONE + "_" + shardName);
      if (!fs.exists(doneFile)) {
        fs.createNewFile(doneFile);
      }
    }

    public void close(final Reporter reporter) throws IOException {
    }
  };
}
 
Example 6
Source File: GsonBigQueryInputFormatTest.java    From hadoop-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * Tests the cleanupJob method of GsonBigQueryInputFormat with intermediate delete but no sharded
 * export.
 */
@Test
public void testCleanupJobWithIntermediateDeleteNoShardedExport()
    throws IOException {
  config.setBoolean(BigQueryConfiguration.DELETE_EXPORT_FILES_FROM_GCS.getKey(), true);

  // GCS cleanup should still happen.
  Path tempPath = new Path(BigQueryConfiguration.TEMP_GCS_PATH.get(config, config::get));
  FileSystem fs = tempPath.getFileSystem(config);
  fs.mkdirs(tempPath);
  Path dataFile = new Path(tempPath.toString() + "/data-00000.json");
  fs.createNewFile(dataFile);
  assertThat(fs.exists(tempPath)).isTrue();
  assertThat(fs.exists(dataFile)).isTrue();

  // Run method and verify calls.
  GsonBigQueryInputFormat.cleanupJob(mockBigQueryHelper, config);

  assertThat(!fs.exists(tempPath)).isTrue();
  assertThat(!fs.exists(dataFile)).isTrue();

  verify(mockBigQueryHelper, times(1)).getTable(eq(tableRef));
  verifyNoMoreInteractions(mockBigquery);
}
 
Example 7
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  Path file = new Path(root, "file");
  fs.createNewFile(file);
  long createTime = System.currentTimeMillis();
  assertTrue("Test file not created!", fs.exists(file));
  TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
  // update the time info for the file, so the cleaner removes it
  fs.setTimes(file, createTime - 100, -1);
  Configuration conf = UTIL.getConfiguration();
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
  cleaner.setConf(conf);
  assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
      + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
Example 8
Source File: Trash.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation) throws IOException {
  if (fs.exists(trashLocation)) {
    if (!fs.isDirectory(trashLocation)) {
      throw new IOException(String.format("Trash location %s is not a directory.", trashLocation));
    }

    if (!fs.exists(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
      // If trash identifier file is not present, directory might have been created by user.
      // Add trash identifier file only if directory is empty.
      if (fs.listStatus(trashLocation).length > 0) {
        throw new IOException(String.format("Trash directory %s exists, but it does not look like a trash directory. "
            + "File: %s missing and directory is not empty.", trashLocation, TRASH_IDENTIFIER_FILE));
      } else if (!fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
        throw new IOException(String.format("Failed to create file %s in existing trash directory %s.",
            TRASH_IDENTIFIER_FILE, trashLocation));
      }
    }
  } else if (!(safeFsMkdir(fs, trashLocation.getParent(), ALL_PERM) && safeFsMkdir(fs, trashLocation, PERM)
      && fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE)))) {
    // Failed to create directory or create trash identifier file.
    throw new IOException("Failed to create trash directory at " + trashLocation.toString());
  }
}
 
Example 9
Source File: TestFileInputFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static List<Path> configureTestSimple(Configuration conf, FileSystem localFs)
    throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  localFs.mkdirs(base1);
  localFs.mkdirs(base2);

  Path in1File1 = new Path(base1, "file1");
  Path in1File2 = new Path(base1, "file2");
  localFs.createNewFile(in1File1);
  localFs.createNewFile(in1File2);

  Path in2File1 = new Path(base2, "file1");
  Path in2File2 = new Path(base2, "file2");
  localFs.createNewFile(in2File1);
  localFs.createNewFile(in2File2);
  List<Path> expectedPaths = Lists.newArrayList(in1File1, in1File2, in2File1,
      in2File2);
  return expectedPaths;
}
 
Example 10
Source File: MetricsFileSystemInstrumentationTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * This test is disabled because it requires a local hdfs cluster at localhost:8020, which requires installation and setup.
 * Changes to {@link MetricsFileSystemInstrumentation} should be followed by a manual run of this tests.
 *
 * TODO: figure out how to fully automate this test.
 * @throws Exception
 */
@Test(enabled = false)
public void test() throws Exception {

  String uri = "instrumented-hdfs://localhost:9000";

  FileSystem fs = FileSystem.get(new URI(uri), new Configuration());

  String name = UUID.randomUUID().toString();
  fs.mkdirs(new Path("/tmp"));

  // Test absolute paths
  Path absolutePath = new Path("/tmp", name);
  Assert.assertFalse(fs.exists(absolutePath));
  fs.createNewFile(absolutePath);
  Assert.assertTrue(fs.exists(absolutePath));
  Assert.assertEquals(fs.getFileStatus(absolutePath).getLen(), 0);
  fs.delete(absolutePath, false);
  Assert.assertFalse(fs.exists(absolutePath));


  // Test fully qualified paths
  Path fqPath = new Path(uri + "/tmp", name);
  Assert.assertFalse(fs.exists(fqPath));
  fs.createNewFile(fqPath);
  Assert.assertTrue(fs.exists(fqPath));
  Assert.assertEquals(fs.getFileStatus(fqPath).getLen(), 0);
  fs.delete(fqPath, false);
  Assert.assertFalse(fs.exists(fqPath));
}
 
Example 11
Source File: ZKSplitLog.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void markCorrupted(Path rootdir, String logFileName,
    FileSystem fs) {
  Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt");
  try {
    fs.createNewFile(file);
  } catch (IOException e) {
    LOG.warn("Could not flag a log file as corrupted. Failed to create " +
        file, e);
  }
}
 
Example 12
Source File: LockUtil.java    From anthelion with Apache License 2.0 5 votes vote down vote up
/**
 * Create a lock file.
 * @param fs filesystem
 * @param lockFile name of the lock file
 * @param accept if true, and the target file exists, consider it valid. If false
 * and the target file exists, throw an IOException.
 * @throws IOException if accept is false, and the target file already exists,
 * or if it's a directory.
 */
public static void createLockFile(FileSystem fs, Path lockFile, boolean accept) throws IOException {
  if (fs.exists(lockFile)) {
    if(!accept)
      throw new IOException("lock file " + lockFile + " already exists.");
    if (fs.getFileStatus(lockFile).isDir())
      throw new IOException("lock file " + lockFile + " already exists and is a directory.");
    // do nothing - the file already exists.
  } else {
    // make sure parents exist
    fs.mkdirs(lockFile.getParent());
    fs.createNewFile(lockFile);
  }
}
 
Example 13
Source File: TestFileInputFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static List<Path> configureTestNestedRecursive(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1).toString());
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inDir1 = new Path(base1, "dir1");
  Path inDir2 = new Path(base1, "dir2");
  Path inFile1 = new Path(base1, "file1");

  Path dir1File1 = new Path(inDir1, "file1");
  Path dir1File2 = new Path(inDir1, "file2");

  Path dir2File1 = new Path(inDir2, "file1");
  Path dir2File2 = new Path(inDir2, "file2");

  localFs.mkdirs(inDir1);
  localFs.mkdirs(inDir2);

  localFs.createNewFile(inFile1);
  localFs.createNewFile(dir1File1);
  localFs.createNewFile(dir1File2);
  localFs.createNewFile(dir2File1);
  localFs.createNewFile(dir2File2);

  List<Path> expectedPaths = Lists.newArrayList(inFile1, dir1File1,
      dir1File2, dir2File1, dir2File2);
  return expectedPaths;
}
 
Example 14
Source File: HoodieTestUtils.java    From hudi with Apache License 2.0 5 votes vote down vote up
public static void createCompactionCommitFiles(FileSystem fs, String basePath, String... instantTimes)
    throws IOException {
  for (String instantTime : instantTimes) {
    boolean createFile = fs.createNewFile(new Path(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/"
        + HoodieTimeline.makeCommitFileName(instantTime)));
    if (!createFile) {
      throw new IOException("cannot create commit file for commit " + instantTime);
    }
  }
}
 
Example 15
Source File: TestCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Setting up a Store
 * @throws IOException with error
 */
protected void initialize() throws IOException {
  Path basedir = new Path(DIR);
  String logName = "logs";
  Path logdir = new Path(DIR, logName);
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(
      TableName.valueOf(Bytes.toBytes("table")));
  tableDescriptor.setColumnFamily(familyDescriptor);
  RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();

  hlog = new FSHLog(fs, basedir, logName, conf);
  hlog.init();
  ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
  region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog);
  region.close();
  Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName());
  region = new HRegion(tableDir, hlog, fs, conf, info, tableDescriptor, null);

  store = new HStore(region, familyDescriptor, conf, false);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
Example 16
Source File: TestSnapshotHFileCleaner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  CommonFSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = CommonFSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  final TableName tableName = TableName.valueOf(name.getMethodName());
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  RegionInfo mockRegion = RegionInfoBuilder.newBuilder(tableName).build();
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
Example 17
Source File: StorageStrategy.java    From Bats with Apache License 2.0 4 votes vote down vote up
/**
 * Creates passed file on appropriate file system.
 * Before creation checks which parent directories do not exists.
 * Applies storage strategy rules to all newly created directories and file.
 * Will return first created parent path or file if no new parent paths created.
 *
 * Case 1: /a/b -> already exists, attempt to create /a/b/c/some_file.txt
 * Will create file and return /a/b/c.
 * Case 2: /a/b/c -> already exists, attempt to create /a/b/c/some_file.txt
 * Will create file and return /a/b/c/some_file.txt.
 * Case 3: /a/b/c/some_file.txt -> already exists, will fail.
 *
 * @param fs file system where file should be located
 * @param file file path
 * @return first created parent path or file
 * @throws IOException is thrown in case of problems while creating path, setting permission
 *         or adding path to delete on exit list
 */
public Path createFileAndApply(FileSystem fs, Path file) throws IOException {
  List<Path> locations = getNonExistentLocations(fs, file.getParent());
  if (!fs.createNewFile(file)) {
    throw new IOException(String.format("File [%s] already exists on file system [%s].",
        file.toUri().getPath(), fs.getUri()));
  }
  applyToFile(fs, file);

  if (locations.isEmpty()) {
    return file;
  }

  for (Path location : locations) {
    applyStrategy(fs, location, getFolderPermission(), deleteOnExit);
  }
  return locations.get(locations.size() - 1);
}
 
Example 18
Source File: TestHFileCleaner.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testHFileCleaning() throws Exception {
  final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
  String prefix = "someHFileThatWouldBeAUUID";
  Configuration conf = UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner," +
      "org.apache.hadoop.hbase.mob.ManualMobMaintHFileCleaner");
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  Path archivedHfileDir =
    new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
  FileSystem fs = FileSystem.get(conf);
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  final long createTime = System.currentTimeMillis();
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // Case 1: 1 invalid file, which should be deleted directly
  fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  LOG.debug("Now is: " + createTime);
  for (int i = 1; i < 32; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveHFileCleaner),
    Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
    fs.createNewFile(fileName);
    // set the creation time past ttl to ensure that it gets removed
    fs.setTimes(fileName, createTime - ttl - 1, -1);
    LOG.debug("Creating " + getFileStats(fileName, fs));
  }

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
  fs.createNewFile(saved);
  // set creation time within the ttl
  fs.setTimes(saved, createTime - ttl / 2, -1);
  LOG.debug("Creating " + getFileStats(saved, fs));
  for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
    LOG.debug(stat.getPath().toString());
  }

  assertEquals(33, fs.listStatus(archivedHfileDir).length);

  // set a custom edge manager to handle time checking
  EnvironmentEdge setTime = new EnvironmentEdge() {
    @Override
    public long currentTime() {
      return createTime;
    }
  };
  EnvironmentEdgeManager.injectEdge(setTime);

  // run the chore
  cleaner.chore();

  // ensure we only end up with the saved file
  assertEquals(1, fs.listStatus(archivedHfileDir).length);

  for (FileStatus file : fs.listStatus(archivedHfileDir)) {
    LOG.debug("Kept hfiles: " + file.getPath().getName());
  }

  // reset the edge back to the original edge
  EnvironmentEdgeManager.injectEdge(originalEdge);
}
 
Example 19
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testExpiryMarkerIdentification() throws Exception {
  // epxired hoplogs from the list below should be deleted
  String[] files = {
      "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
      "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
      "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
      "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
      "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
  
  Path bucketPath = new Path(testDataDir, getName() + "/0");
  FileSystem fs = hdfsStore.getFileSystem();
  for (String file : files) {
    Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
        blockCache, stats, storeStats);
    createHoplog(10, oplog);
  }

  String marker1 = "0-4-1234"
      + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
      + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
  fs.createNewFile(new Path(bucketPath, marker1));
  String marker2 = "0-5-1235"
      + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
      + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
  fs.createNewFile(new Path(bucketPath, marker2));    
  
  FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
  assertEquals(7, hoplogs.length);
  
  HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
      regionManager, 0);
  
  FileStatus[] markers = organizer.getExpiryMarkers();
  // one hoplog and one exp marker will be deletion targets
  assertEquals(2, markers.length);
  for (FileStatus marker : markers) {
    String name = marker.getPath().getName();
    assertTrue(name.equals(marker1) || name.equals(marker2));
  }
  organizer.close();
}
 
Example 20
Source File: HalvadeConf.java    From halvade with GNU General Public License v3.0 4 votes vote down vote up
public static boolean addTaskRunning(Configuration conf, String val) throws IOException, URISyntaxException {
    val = val.substring(0, val.lastIndexOf("_")); // rewrite file if second attempt
    String filepath = conf.get(outdir) + tasksDone + val;
    FileSystem fs = FileSystem.get(new URI(filepath), conf);
    return fs.createNewFile(new Path(filepath));
}