Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#getFileSystemAs()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#getFileSystemAs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAuditLogs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  try {
    userfs.open(file);
    fail("open must not succeed");
  } catch(AccessControlException e) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogs(false);
}
 
Example 2
Source File: TestAuditLogs.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  try {
    userfs.open(file);
    fail("open must not succeed");
  } catch(AccessControlException e) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogs(false);
}
 
Example 3
Source File: TestStickyBit.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
    .build();
  hdfs = cluster.getFileSystem();
  assertTrue(hdfs instanceof DistributedFileSystem);
  hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
  assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
  hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
  assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
 
Example 4
Source File: TestBackupSmallTests.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test(expected = IOException.class) public void testBackupPathIsNotAccessible() throws Exception {
  Path path = new Path(PERMISSION_TEST_PATH);
  FileSystem rootFs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration());
  rootFs.mkdirs(path.getParent());
  rootFs.setPermission(path.getParent(), FsPermission.createImmutable((short) 000));
  FileSystem fs =
      DFSTestUtil.getFileSystemAs(DIANA, TEST_UTIL.getConnection().getConfiguration());
  fs.mkdirs(path);
}
 
Example 5
Source File: TestAclWithSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the cluster, wait for it to become active, and get FileSystem
 * instances for our test users.
 *
 * @param format if true, format the NameNode and DataNodes before starting up
 * @throws Exception if any step fails
 */
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
    .build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
  fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
 
Example 6
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
  final UserGroupInformation myUser = UserGroupInformation
      .createRemoteUser("myuser");
  final DistributedFileSystem myDfs = 
      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
  final String poolName = "poolparty";
  dfs.addCachePool(new CachePoolInfo(poolName)
      .setMode(new FsPermission((short)0700)));
  // Should only see partial info
  RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
  CachePoolInfo info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertNull("Unexpected owner name", info.getOwnerName());
  assertNull("Unexpected group name", info.getGroupName());
  assertNull("Unexpected mode", info.getMode());
  assertNull("Unexpected limit", info.getLimit());
  // Modify the pool so myuser is now the owner
  final long limit = 99;
  dfs.modifyCachePool(new CachePoolInfo(poolName)
      .setOwnerName(myUser.getShortUserName())
      .setLimit(limit));
  // Should see full info
  it = myDfs.listCachePools();
  info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertEquals("Mismatched owner name", myUser.getShortUserName(),
      info.getOwnerName());
  assertNotNull("Expected group name", info.getGroupName());
  assertEquals("Mismatched mode", (short) 0700,
      info.getMode().toShort());
  assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
 
Example 7
Source File: TestAuditLogs.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  FileStatus st = userfs.getFileStatus(file);
  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
 
Example 8
Source File: TestAuditLogs.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** test that allowed operation puts proper entry in audit log */
@Test
public void testAuditAllowed() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  InputStream istream = userfs.open(file);
  int val = istream.read();
  istream.close();
  verifyAuditLogs(true);
  assertTrue("failed to read from file", val >= 0);
}
 
Example 9
Source File: TestStickyBit.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
    .build();
  hdfs = cluster.getFileSystem();
  assertTrue(hdfs instanceof DistributedFileSystem);
  hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
  assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
  hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
  assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
 
Example 10
Source File: TestAclWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the cluster, wait for it to become active, and get FileSystem
 * instances for our test users.
 *
 * @param format if true, format the NameNode and DataNodes before starting up
 * @throws Exception if any step fails
 */
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
    .build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
  fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
 
Example 11
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
  final UserGroupInformation myUser = UserGroupInformation
      .createRemoteUser("myuser");
  final DistributedFileSystem myDfs = 
      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
  final String poolName = "poolparty";
  dfs.addCachePool(new CachePoolInfo(poolName)
      .setMode(new FsPermission((short)0700)));
  // Should only see partial info
  RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
  CachePoolInfo info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertNull("Unexpected owner name", info.getOwnerName());
  assertNull("Unexpected group name", info.getGroupName());
  assertNull("Unexpected mode", info.getMode());
  assertNull("Unexpected limit", info.getLimit());
  // Modify the pool so myuser is now the owner
  final long limit = 99;
  dfs.modifyCachePool(new CachePoolInfo(poolName)
      .setOwnerName(myUser.getShortUserName())
      .setLimit(limit));
  // Should see full info
  it = myDfs.listCachePools();
  info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertEquals("Mismatched owner name", myUser.getShortUserName(),
      info.getOwnerName());
  assertNotNull("Expected group name", info.getGroupName());
  assertEquals("Mismatched mode", (short) 0700,
      info.getMode().toShort());
  assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
 
Example 12
Source File: TestAuditLogs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  FileStatus st = userfs.getFileStatus(file);
  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
 
Example 13
Source File: TestAuditLogs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** test that allowed operation puts proper entry in audit log */
@Test
public void testAuditAllowed() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  InputStream istream = userfs.open(file);
  int val = istream.read();
  istream.close();
  verifyAuditLogs(true);
  assertTrue("failed to read from file", val >= 0);
}
 
Example 14
Source File: TestSnapshottableDirListing.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the listing with different user names to make sure only directories
 * that are owned by the user are listed.
 */
@Test (timeout=60000)
public void testListWithDifferentUser() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  // first make dir1 and dir2 snapshottable
  hdfs.allowSnapshot(dir1);
  hdfs.allowSnapshot(dir2);
  hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
  
  // create two dirs and make them snapshottable under the name of user1
  UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting(
      "user1", new String[] { "group1" });
  DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(ugi1, conf);
  Path dir1_user1 = new Path("/dir1_user1");
  Path dir2_user1 = new Path("/dir2_user1");
  fs1.mkdirs(dir1_user1);
  fs1.mkdirs(dir2_user1);
  hdfs.allowSnapshot(dir1_user1);
  hdfs.allowSnapshot(dir2_user1);
  
  // user2
  UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
      "user2", new String[] { "group2" });
  DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(ugi2, conf);
  Path dir_user2 = new Path("/dir_user2");
  Path subdir_user2 = new Path(dir_user2, "subdir");
  fs2.mkdirs(dir_user2);
  fs2.mkdirs(subdir_user2);
  hdfs.allowSnapshot(dir_user2);
  hdfs.allowSnapshot(subdir_user2);
  
  // super user
  String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
      DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
  UserGroupInformation superUgi = UserGroupInformation.createUserForTesting(
      "superuser", new String[] { supergroup });
  DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(superUgi, conf);
  
  // list the snapshottable dirs for superuser
  SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
  // 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
  // subdir_user2
  assertEquals(6, dirs.length);
  
  // list the snapshottable dirs for user1
  dirs = fs1.getSnapshottableDirListing();
  // 2 dirs owned by user1: dir1_user1 and dir2_user1
  assertEquals(2, dirs.length);
  assertEquals(dir1_user1, dirs[0].getFullPath());
  assertEquals(dir2_user1, dirs[1].getFullPath());
  
  // list the snapshottable dirs for user2
  dirs = fs2.getSnapshottableDirListing();
  // 2 dirs owned by user2: dir_user2 and subdir_user2
  assertEquals(2, dirs.length);
  assertEquals(dir_user2, dirs[0].getFullPath());
  assertEquals(subdir_user2, dirs[1].getFullPath());
}
 
Example 15
Source File: TestSnapshottableDirListing.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the listing with different user names to make sure only directories
 * that are owned by the user are listed.
 */
@Test (timeout=60000)
public void testListWithDifferentUser() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  // first make dir1 and dir2 snapshottable
  hdfs.allowSnapshot(dir1);
  hdfs.allowSnapshot(dir2);
  hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
  
  // create two dirs and make them snapshottable under the name of user1
  UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting(
      "user1", new String[] { "group1" });
  DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(ugi1, conf);
  Path dir1_user1 = new Path("/dir1_user1");
  Path dir2_user1 = new Path("/dir2_user1");
  fs1.mkdirs(dir1_user1);
  fs1.mkdirs(dir2_user1);
  hdfs.allowSnapshot(dir1_user1);
  hdfs.allowSnapshot(dir2_user1);
  
  // user2
  UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
      "user2", new String[] { "group2" });
  DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(ugi2, conf);
  Path dir_user2 = new Path("/dir_user2");
  Path subdir_user2 = new Path(dir_user2, "subdir");
  fs2.mkdirs(dir_user2);
  fs2.mkdirs(subdir_user2);
  hdfs.allowSnapshot(dir_user2);
  hdfs.allowSnapshot(subdir_user2);
  
  // super user
  String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
      DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
  UserGroupInformation superUgi = UserGroupInformation.createUserForTesting(
      "superuser", new String[] { supergroup });
  DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil
      .getFileSystemAs(superUgi, conf);
  
  // list the snapshottable dirs for superuser
  SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
  // 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
  // subdir_user2
  assertEquals(6, dirs.length);
  
  // list the snapshottable dirs for user1
  dirs = fs1.getSnapshottableDirListing();
  // 2 dirs owned by user1: dir1_user1 and dir2_user1
  assertEquals(2, dirs.length);
  assertEquals(dir1_user1, dirs[0].getFullPath());
  assertEquals(dir2_user1, dirs[1].getFullPath());
  
  // list the snapshottable dirs for user2
  dirs = fs2.getSnapshottableDirListing();
  // 2 dirs owned by user2: dir_user2 and subdir_user2
  assertEquals(2, dirs.length);
  assertEquals(dir_user2, dirs[0].getFullPath());
  assertEquals(subdir_user2, dirs[1].getFullPath());
}
 
Example 16
Source File: FSXAttrBaseTest.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Creates a FileSystem for a specific user.
 *
 * @param user UserGroupInformation specific user
 * @return FileSystem for specific user
 * @throws Exception if creation fails
 */
protected FileSystem createFileSystem(UserGroupInformation user)
    throws Exception {
  return DFSTestUtil.getFileSystemAs(user, conf);
}
 
Example 17
Source File: FSAclBaseTest.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Creates a FileSystem for a specific user.
 *
 * @param user UserGroupInformation specific user
 * @return FileSystem for specific user
 * @throws Exception if creation fails
 */
protected FileSystem createFileSystem(UserGroupInformation user)
    throws Exception {
  return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
}
 
Example 18
Source File: FSAclBaseTest.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Creates a FileSystem for a specific user.
 *
 * @param user UserGroupInformation specific user
 * @return FileSystem for specific user
 * @throws Exception if creation fails
 */
protected FileSystem createFileSystem(UserGroupInformation user)
    throws Exception {
  return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
}
 
Example 19
Source File: FSXAttrBaseTest.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Creates a FileSystem for a specific user.
 *
 * @param user UserGroupInformation specific user
 * @return FileSystem for specific user
 * @throws Exception if creation fails
 */
protected FileSystem createFileSystem(UserGroupInformation user)
    throws Exception {
  return DFSTestUtil.getFileSystemAs(user, conf);
}