Java Code Examples for org.apache.hadoop.fs.FileSystem.setTimes()

The following are Jave code examples for showing how to use setTimes() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: BaseTestHttpFSWith.java   Source Code and License Vote up 6 votes
private void testSetTimes() throws Exception {
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    long at = status1.getAccessTime();
    long mt = status1.getModificationTime();

    fs = getHttpFSFileSystem();
    fs.setTimes(path, mt - 10, at - 20);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    status1 = fs.getFileStatus(path);
    fs.close();
    long atNew = status1.getAccessTime();
    long mtNew = status1.getModificationTime();
    Assert.assertEquals(mtNew, mt - 10);
    Assert.assertEquals(atNew, at - 20);
  }
}
 
Example 2
Project: hadoop   File: TestAuditLogger.java   Source Code and License Vote up 6 votes
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Project: hadoop   File: TestAuditLogger.java   Source Code and License Vote up 6 votes
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Project: hadoop   File: TestAuditLogger.java   Source Code and License Vote up 6 votes
/**
 * Tests that a broken audit logger causes requests to fail.
 */
@Test
public void testBrokenLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      BrokenAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    fail("Expected exception due to broken audit logger.");
  } catch (RemoteException re) {
    // Expected.
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Project: hadoop   File: DistCpV1.java   Source Code and License Vote up 6 votes
private static void updateDestStatus(FileStatus src, FileStatus dst,
    EnumSet<FileAttribute> preseved, FileSystem destFileSys
    ) throws IOException {
  String owner = null;
  String group = null;
  if (preseved.contains(FileAttribute.USER)
      && !src.getOwner().equals(dst.getOwner())) {
    owner = src.getOwner();
  }
  if (preseved.contains(FileAttribute.GROUP)
      && !src.getGroup().equals(dst.getGroup())) {
    group = src.getGroup();
  }
  if (owner != null || group != null) {
    destFileSys.setOwner(dst.getPath(), owner, group);
  }
  if (preseved.contains(FileAttribute.PERMISSION)
      && !src.getPermission().equals(dst.getPermission())) {
    destFileSys.setPermission(dst.getPath(), src.getPermission());
  }
  if (preseved.contains(FileAttribute.TIMES)) {
    destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
  }
}
 
Example 6
Project: ditb   File: TestHFileCleaner.java   Source Code and License Vote up 6 votes
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  Path file = new Path(root, "file");
  fs.createNewFile(file);
  long createTime = System.currentTimeMillis();
  assertTrue("Test file not created!", fs.exists(file));
  TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
  // update the time info for the file, so the cleaner removes it
  fs.setTimes(file, createTime - 100, -1);
  Configuration conf = UTIL.getConfiguration();
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
  cleaner.setConf(conf);
  assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
      + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
Example 7
Project: hadoop   File: TestNameNodeMXBean.java   Source Code and License Vote up 5 votes
@Test(timeout=120000)
public void testTopUsersDisabled() throws Exception {
  final Configuration conf = new Configuration();
  // Disable nntop
  conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanNameFsns = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    FileSystem fs = cluster.getFileSystem();
    final Path path = new Path("/");
    final int NUM_OPS = 10;
    for (int i=0; i< NUM_OPS; i++) {
      fs.listStatus(path);
      fs.setTimes(path, 0, 1);
    }
    String topUsers =
        (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
    assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Project: hadoop   File: TestNameNodeMXBean.java   Source Code and License Vote up 5 votes
@Test(timeout=120000)
public void testTopUsersNoPeriods() throws Exception {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, true);
  conf.set(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, "");
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanNameFsns = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    FileSystem fs = cluster.getFileSystem();
    final Path path = new Path("/");
    final int NUM_OPS = 10;
    for (int i=0; i< NUM_OPS; i++) {
      fs.listStatus(path);
      fs.setTimes(path, 0, 1);
    }
    String topUsers =
        (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
    assertNotNull("Expected TopUserOpCounts bean!", topUsers);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 9
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveDefaults() throws IOException {
  FileSystem fs = FileSystem.get(config);
  
  // preserve replication, block size, user, group, permission, 
  // checksum type and timestamps    
  EnumSet<FileAttribute> attributes = 
      DistCpUtils.unpackAttributes(
          DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);
  
  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 10
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveNothingOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(dstStatus.getAccessTime() == 100);
  Assert.assertTrue(dstStatus.getModificationTime() == 100);
  Assert.assertTrue(dstStatus.getReplication() == 0);
}
 
Example 11
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveNothingOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 12
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreservePermissionOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 13
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveGroupOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 14
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveUserOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 15
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveReplicationOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 16
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveTimestampOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 17
Project: hadoop   File: TestNameNodeMXBean.java   Source Code and License Vote up 4 votes
@Test(timeout=120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanNameFsns = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    FileSystem fs = cluster.getFileSystem();
    final Path path = new Path("/");
    final int NUM_OPS = 10;
    for (int i=0; i< NUM_OPS; i++) {
      fs.listStatus(path);
      fs.setTimes(path, 0, 1);
    }
    String topUsers =
        (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
    ObjectMapper mapper = new ObjectMapper();
    Map<String, Object> map = mapper.readValue(topUsers, Map.class);
    assertTrue("Could not find map key timestamp", 
        map.containsKey("timestamp"));
    assertTrue("Could not find map key windows", map.containsKey("windows"));
    List<Map<String, List<Map<String, Object>>>> windows =
        (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
    assertEquals("Unexpected num windows", 3, windows.size());
    for (Map<String, List<Map<String, Object>>> window : windows) {
      final List<Map<String, Object>> ops = window.get("ops");
      assertEquals("Unexpected num ops", 3, ops.size());
      for (Map<String, Object> op: ops) {
        final long count = Long.parseLong(op.get("totalCount").toString());
        final String opType = op.get("opType").toString();
        final int expected;
        if (opType.equals(TopConf.ALL_CMDS)) {
          expected = 2*NUM_OPS;
        } else {
          expected = NUM_OPS;
        }
        assertEquals("Unexpected total count", expected, count);
      }
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 18
Project: ditb   File: TestHFileCleaner.java   Source Code and License Vote up 4 votes
@Test(timeout = 60 *1000)
public void testHFileCleaning() throws Exception {
  final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
  String prefix = "someHFileThatWouldBeAUUID";
  Configuration conf = UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
    "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
  FileSystem fs = FileSystem.get(conf);
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  final long createTime = System.currentTimeMillis();
  fs.delete(archivedHfileDir, true);
  fs.mkdirs(archivedHfileDir);
  // Case 1: 1 invalid file, which should be deleted directly
  fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  LOG.debug("Now is: " + createTime);
  for (int i = 1; i < 32; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveHFileCleaner),
    Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
    fs.createNewFile(fileName);
    // set the creation time past ttl to ensure that it gets removed
    fs.setTimes(fileName, createTime - ttl - 1, -1);
    LOG.debug("Creating " + getFileStats(fileName, fs));
  }

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
  fs.createNewFile(saved);
  // set creation time within the ttl
  fs.setTimes(saved, createTime - ttl / 2, -1);
  LOG.debug("Creating " + getFileStats(saved, fs));
  for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
    LOG.debug(stat.getPath().toString());
  }

  assertEquals(33, fs.listStatus(archivedHfileDir).length);

  // set a custom edge manager to handle time checking
  EnvironmentEdge setTime = new EnvironmentEdge() {
    @Override
    public long currentTime() {
      return createTime;
    }
  };
  EnvironmentEdgeManager.injectEdge(setTime);

  // run the chore
  cleaner.chore();

  // ensure we only end up with the saved file
  assertEquals(1, fs.listStatus(archivedHfileDir).length);

  for (FileStatus file : fs.listStatus(archivedHfileDir)) {
    LOG.debug("Kept hfiles: " + file.getPath().getName());
  }

  // reset the edge back to the original edge
  EnvironmentEdgeManager.injectEdge(originalEdge);
}
 
Example 19
Project: ditb   File: FSUtils.java   Source Code and License Vote up 4 votes
public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
    throws IOException {
  // set the modify time for TimeToLive Cleaner
  fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
  return fs.rename(src, dest);
}
 
Example 20
Project: hadoop   File: FSOperations.java   Source Code and License Vote up 2 votes
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return void.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Void execute(FileSystem fs) throws IOException {
  fs.setTimes(path, mTime, aTime);
  return null;
}