Java Code Examples for org.apache.hadoop.fs.permission.FsPermission

The following are top voted examples for showing how to use org.apache.hadoop.fs.permission.FsPermission. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop-oss   File: TestCredentialProviderFactory.java   View source code 7 votes vote down vote up
@Test
public void testJksProvider() throws Exception {
  Configuration conf = new Configuration();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  final String ourUrl =
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();

  File file = new File(tmpDir, "test.jks");
  file.delete();
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  Path path = ProviderUtils.unnestUri(new URI(ourUrl));
  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue(s.getPermission().toString().equals("rwx------"));
  assertTrue(file + " should exist", file.isFile());

  // check permission retention after explicit change
  fs.setPermission(path, new FsPermission("777"));
  checkPermissionRetention(conf, ourUrl, path);
}
 
Example 2
Project: hadoop-oss   File: SFTPFileSystem.java   View source code 7 votes vote down vote up
/**
 * Convert the file information in LsEntry to a {@link FileStatus} object. *
 *
 * @param sftpFile
 * @param parentPath
 * @return file status
 * @throws IOException
 */
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
    Path parentPath) throws IOException {

  SftpATTRS attr = sftpFile.getAttrs();
  long length = attr.getSize();
  boolean isDir = attr.isDir();
  boolean isLink = attr.isLink();
  if (isLink) {
    String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
    try {
      link = channel.realpath(link);

      Path linkParent = new Path("/", link);

      FileStatus fstat = getFileStatus(channel, linkParent);
      isDir = fstat.isDirectory();
      length = fstat.getLen();
    } catch (Exception e) {
      throw new IOException(e);
    }
  }
  int blockReplication = 1;
  // Using default block size since there is no way in SFTP channel to know of
  // block sizes on server. The assumption could be less than ideal.
  long blockSize = DEFAULT_BLOCK_SIZE;
  long modTime = attr.getMTime() * 1000; // convert to milliseconds
  long accessTime = 0;
  FsPermission permission = getPermissions(sftpFile);
  // not be able to get the real user group name, just use the user and group
  // id
  String user = Integer.toString(attr.getUId());
  String group = Integer.toString(attr.getGId());
  Path filePath = new Path(parentPath, sftpFile.getFilename());

  return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
      accessTime, permission, user, group, filePath.makeQualified(
          this.getUri(), this.getWorkingDirectory()));
}
 
Example 3
Project: ditb   File: ExportSnapshot.java   View source code 7 votes vote down vote up
/**
 * Create the output folder and optionally set ownership.
 */
private void createOutputPath(final Path path) throws IOException {
  if (filesUser == null && filesGroup == null) {
    outputFs.mkdirs(path);
  } else {
    Path parent = path.getParent();
    if (!outputFs.exists(parent) && !parent.isRoot()) {
      createOutputPath(parent);
    }
    outputFs.mkdirs(path);
    if (filesUser != null || filesGroup != null) {
      // override the owner when non-null user/group is specified
      outputFs.setOwner(path, filesUser, filesGroup);
    }
    if (filesMode > 0) {
      outputFs.setPermission(path, new FsPermission(filesMode));
    }
  }
}
 
Example 4
Project: hadoop   File: S3FileSystem.java   View source code 7 votes vote down vote up
/**
 * @param permission Currently ignored.
 */
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
  Path absolutePath = makeAbsolute(path);
  List<Path> paths = new ArrayList<Path>();
  do {
    paths.add(0, absolutePath);
    absolutePath = absolutePath.getParent();
  } while (absolutePath != null);
  
  boolean result = true;
  for (Path p : paths) {
    result &= mkdir(p);
  }
  return result;
}
 
Example 5
Project: hadoop   File: ChecksumFileSystem.java   View source code 6 votes vote down vote up
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
Example 6
Project: hadoop-oss   File: FileContextMainOperationsBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testGlobStatusWithNoMatchesInPath() throws Exception {
  Path[] testDirs = {
      getTestRootPath(fc, TEST_DIR_AAA),
      getTestRootPath(fc, TEST_DIR_AXA),
      getTestRootPath(fc, TEST_DIR_AXX),
      getTestRootPath(fc, TEST_DIR_AAA2), };

  if (exists(fc, testDirs[0]) == false) {
    for (Path path : testDirs) {
      fc.mkdir(path, FsPermission.getDefault(), true);
    }
  }

  // should return nothing
  FileStatus[] paths = fc.util().globStatus(
      getTestRootPath(fc, "test/hadoop/?"));
  Assert.assertEquals(0, paths.length);
}
 
Example 7
Project: hadoop   File: FileContextPermissionBase.java   View source code 6 votes vote down vote up
@Test
public void testSetPermission() throws IOException {
  if (Path.WINDOWS) {
    System.out.println("Cannot run test for Windows");
    return;
  }

  String filename = "foo";
  Path f = fileContextTestHelper.getTestRootPath(fc, filename);
  createFile(fc, f);

  try {
    // create files and manipulate them.
    FsPermission all = new FsPermission((short)0777);
    FsPermission none = new FsPermission((short)0);

    fc.setPermission(f, none);
    doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());

    fc.setPermission(f, all);
    doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
  }
  finally {cleanupFile(fc, f);}
}
 
Example 8
Project: hadoop-oss   File: FileContextPermissionBase.java   View source code 6 votes vote down vote up
@Test
public void testSetPermission() throws IOException {
  if (Path.WINDOWS) {
    System.out.println("Cannot run test for Windows");
    return;
  }

  String filename = "foo";
  Path f = fileContextTestHelper.getTestRootPath(fc, filename);
  createFile(fc, f);

  try {
    // create files and manipulate them.
    FsPermission all = new FsPermission((short)0777);
    FsPermission none = new FsPermission((short)0);

    fc.setPermission(f, none);
    doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());

    fc.setPermission(f, all);
    doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
  }
  finally {cleanupFile(fc, f);}
}
 
Example 9
Project: hadoop   File: FSAclBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testRemoveAclEntriesOnlyAccess() throws IOException {
  fs.create(path).close();
  fs.setPermission(path, FsPermission.createImmutable((short)0640));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, ALL),
    aclEntry(ACCESS, USER, "foo", ALL),
    aclEntry(ACCESS, USER, "bar", READ_WRITE),
    aclEntry(ACCESS, GROUP, READ_WRITE),
    aclEntry(ACCESS, OTHER, NONE));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, "foo"));
  fs.removeAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(ACCESS, USER, "bar", READ_WRITE),
    aclEntry(ACCESS, GROUP, READ_WRITE) }, returned);
  assertPermission((short)010760);
  assertAclFeature(true);
}
 
Example 10
Project: hadoop-oss   File: FileSystem.java   View source code 6 votes vote down vote up
/**
 * This version of the mkdirs method assumes that the permission is absolute.
 * It has been added to support the FileContext that processes the permission
 * with umask before calling this method.
 * This a temporary method added to support the transition from FileSystem
 * to FileContext for user applications.
 */
@Deprecated
protected void primitiveMkdir(Path f, FsPermission absolutePermission, 
                  boolean createParent)
  throws IOException {
  
  if (!createParent) { // parent must exist.
    // since the this.mkdirs makes parent dirs automatically
    // we must throw exception if parent does not exist.
    final FileStatus stat = getFileStatus(f.getParent());
    if (stat == null) {
      throw new FileNotFoundException("Missing parent:" + f);
    }
    if (!stat.isDirectory()) {
      throw new ParentNotDirectoryException("parent is not a dir");
    }
    // parent does exist - go ahead with mkdir of leaf
  }
  // Default impl is to assume that permissions do not matter and hence
  // calling the regular mkdirs is good enough.
  // FSs that implement permissions should override this.
  if (!this.mkdirs(f, absolutePermission)) {
    throw new IOException("mkdir of "+ f + " failed");
  }
}
 
Example 11
Project: hadoop-oss   File: FileSystem.java   View source code 6 votes vote down vote up
/**
 * This method provides the default implementation of
 * {@link #access(Path, FsAction)}.
 *
 * @param stat FileStatus to check
 * @param mode type of access to check
 * @throws IOException for any error
 */
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
    throws IOException {
  FsPermission perm = stat.getPermission();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  String user = ugi.getShortUserName();
  List<String> groups = Arrays.asList(ugi.getGroupNames());
  if (user.equals(stat.getOwner())) {
    if (perm.getUserAction().implies(mode)) {
      return;
    }
  } else if (groups.contains(stat.getGroup())) {
    if (perm.getGroupAction().implies(mode)) {
      return;
    }
  } else {
    if (perm.getOtherAction().implies(mode)) {
      return;
    }
  }
  throw new AccessControlException(String.format(
    "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
    stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
 
Example 12
Project: dremio-oss   File: TestPseudoDistributedFileSystem.java   View source code 6 votes vote down vote up
@Before
public void setUpLocalFS() throws IOException {
  final FileStatus rootStatus = new FileStatus(4096, true, 0, 0, 37, 42, FsPermission.createImmutable((short) 0555), "root", "wheel", new Path("sabot://10.0.0.1:1234/"));
  final FileStatus fooStatus = new FileStatus(38214, true, 0, 0, 45, 67, FsPermission.createImmutable((short) 0755), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo"));
  final FileStatus fooBarStatus = new FileStatus(67128, true, 1, 4096, 69, 68, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar"));
  final FileStatus fooBarDirStatus = new FileStatus(47, true, 0, 0, 1234, 3645, FsPermission.createImmutable((short) 0755), "admin", "admin", new Path("sabot://10.0.0.1:1234/foo/bar/dir"));
  final FileStatus fooBarFile1Status = new FileStatus(1024, false, 1, 4096, 37, 42, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar/file1"));
  final FileStatus fooBarFile2Status = new FileStatus(2048, false, 1, 4096, 37, 42, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar/file2"));

  doReturn(rootStatus).when(mockLocalFS).getFileStatus(new Path("/"));
  doThrow(new FileNotFoundException()).when(mockLocalFS).getFileStatus(any(Path.class));
  doReturn(fooBarFile2Status).when(mockLocalFS).getFileStatus(new Path("/foo/bar/file2"));
  doReturn(fooBarFile1Status).when(mockLocalFS).getFileStatus(new Path("/foo/bar/file1"));
  doReturn(fooBarDirStatus).when(mockLocalFS).getFileStatus(new Path("/foo/bar/dir"));
  doReturn(fooBarStatus).when(mockLocalFS).getFileStatus(new Path("/foo/bar"));
  doReturn(fooStatus).when(mockLocalFS).getFileStatus(new Path("/foo"));
  doReturn(rootStatus).when(mockLocalFS).getFileStatus(new Path("/"));

  doThrow(new FileNotFoundException()).when(mockLocalFS).listStatus(any(Path.class));
  doReturn(new FileStatus[] { fooBarDirStatus, fooBarFile1Status, fooBarFile2Status }).when(mockLocalFS).listStatus(new Path("/foo/bar"));
  doReturn(new FileStatus[] { fooBarStatus }).when(mockLocalFS).listStatus(new Path("/foo"));
  doReturn(new FileStatus[] { fooStatus }).when(mockLocalFS).listStatus(new Path("/"));
}
 
Example 13
Project: hadoop   File: AclCommands.java   View source code 6 votes vote down vote up
@Override
protected void processPath(PathData item) throws IOException {
  out.println("# file: " + item);
  out.println("# owner: " + item.stat.getOwner());
  out.println("# group: " + item.stat.getGroup());
  FsPermission perm = item.stat.getPermission();
  if (perm.getStickyBit()) {
    out.println("# flags: --" +
      (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
  }

  AclStatus aclStatus = item.fs.getAclStatus(item.path);
  List<AclEntry> entries = perm.getAclBit() ? aclStatus.getEntries()
      : Collections.<AclEntry> emptyList();
  ScopedAclEntries scopedEntries = new ScopedAclEntries(
    AclUtil.getAclFromPermAndEntries(perm, entries));
  printAclEntriesForSingleScope(aclStatus, perm,
      scopedEntries.getAccessEntries());
  printAclEntriesForSingleScope(aclStatus, perm,
      scopedEntries.getDefaultEntries());
  out.println();
}
 
Example 14
Project: hadoop   File: FSAclBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testSetPermission() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, ALL),
    aclEntry(ACCESS, USER, "foo", ALL),
    aclEntry(ACCESS, GROUP, READ_EXECUTE),
    aclEntry(ACCESS, OTHER, NONE),
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  fs.setPermission(path, FsPermission.createImmutable((short)0700));
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(ACCESS, USER, "foo", ALL),
    aclEntry(ACCESS, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, USER, "foo", ALL),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, MASK, ALL),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010700);
  assertAclFeature(true);
}
 
Example 15
Project: hadoop   File: TestEncryptionZones.java   View source code 6 votes vote down vote up
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
 
Example 16
Project: hadoop-oss   File: JavaKeyStoreProvider.java   View source code 6 votes vote down vote up
private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
    throws NoSuchAlgorithmException, CertificateException,
    IOException {
  FsPermission perm = null;
  try {
    perm = loadFromPath(pathToLoad, password);
    renameOrFail(pathToLoad, path);
    if (LOG.isDebugEnabled()) {
      LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
          pathToLoad));
    }
    if (fs.exists(pathToDelete)) {
      fs.delete(pathToDelete, true);
    }
  } catch (IOException e) {
    // Check for password issue : don't want to trash file due
    // to wrong password
    if (isBadorWrongPassword(e)) {
      throw e;
    }
  }
  return perm;
}
 
Example 17
Project: hadoop   File: TestDFSPermission.java   View source code 6 votes vote down vote up
@Test
public void testAccessOthers() throws IOException, InterruptedException {
  FileSystem rootFs = FileSystem.get(conf);
  Path p3 = new Path("/p3");
  rootFs.mkdirs(p3);
  rootFs.setPermission(p3, new FsPermission((short) 0774));
  fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(conf);
    }
  });
  fs.access(p3, FsAction.READ);
  try {
    fs.access(p3, FsAction.READ_WRITE);
    fail("The access call should have failed.");
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(USER1_NAME));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                p3.getParent().toUri().getPath()));
  }
}
 
Example 18
Project: hadoop   File: TestAuditLogs.java   View source code 6 votes vote down vote up
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();
  try {
    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
    InputStream istream = webfs.open(file);
    int val = istream.read();
    fail("open+read must not succeed, got " + val);
  } catch(AccessControlException E) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogsRepeat(false, 2);
}
 
Example 19
Project: hadoop   File: TestDistCacheEmulation.java   View source code 6 votes vote down vote up
/**
 * Test the configuration property for disabling/enabling emulation of
 * distributed cache load.
 */
@Test  (timeout=2000)
public void testDistCacheEmulationConfigurability() throws IOException {
  Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
  Path ioPath = new Path("testDistCacheEmulationConfigurability")
      .makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
  FileSystem fs = FileSystem.get(jobConf);
  FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));

  // default config
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertTrue("Default configuration of "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " is wrong.", dce.shouldEmulateDistCacheLoad());

  // config property set to false
  jobConf.setBoolean(
      DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertFalse("Disabling of emulation of distributed cache load by setting "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " to false is not working.", dce.shouldEmulateDistCacheLoad());
}
 
Example 20
Project: hadoop   File: TestEncryptionZones.java   View source code 6 votes vote down vote up
@Test(timeout = 120000)
public void testCreateEZWithNoProvider() throws Exception {
  // Unset the key provider and make sure EZ ops don't work
  final Configuration clusterConf = cluster.getConfiguration(0);
  clusterConf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
  cluster.restartNameNode(true);
  cluster.waitActive();
  final Path zone1 = new Path("/zone1");
  fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
  try {
    dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
    fail("expected exception");
  } catch (IOException e) {
    assertExceptionContains("since no key provider is available", e);
  }
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  clusterConf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  // Try listing EZs as well
  assertNumZones(0);
}
 
Example 21
Project: hadoop   File: TestFavoredNodesEndToEnd.java   View source code 6 votes vote down vote up
@Test(timeout=180000)
public void testWhenFavoredNodesNotPresent() throws Exception {
  //when we ask for favored nodes but the nodes are not there, we should
  //get some other nodes. In other words, the write to hdfs should not fail
  //and if we do getBlockLocations on the file, we should see one blklocation
  //and three hosts for that
  InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
  for (int i = 0; i < 3; i++) {
    arbitraryAddrs[i] = getArbitraryLocalHostAddr();
  }
  Path p = new Path("/filename-foo-bar");
  FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
      4096, (short)3, 4096L, null, arbitraryAddrs);
  out.write(SOME_BYTES);
  out.close();
  getBlockLocations(p);
}
 
Example 22
Project: hadoop   File: FSAclBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testRemoveAclEntriesMinimalDefault() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, ALL),
    aclEntry(ACCESS, USER, "foo", ALL),
    aclEntry(ACCESS, GROUP, READ_EXECUTE),
    aclEntry(ACCESS, OTHER, NONE),
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, "foo"),
    aclEntry(ACCESS, MASK),
    aclEntry(DEFAULT, USER, "foo"),
    aclEntry(DEFAULT, MASK));
  fs.removeAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010750);
  assertAclFeature(true);
}
 
Example 23
Project: hadoop   File: TestDFSPermission.java   View source code 6 votes vote down vote up
private void create(OpType op, Path name, short umask, 
    FsPermission permission) throws IOException {
  // set umask in configuration, converting to padded octal
  conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));

  // create the file/directory
  switch (op) {
  case CREATE:
    FSDataOutputStream out = fs.create(name, permission, true, 
        conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
        fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
    out.close();
    break;
  case MKDIRS:
    fs.mkdirs(name, permission);
    break;
  default:
    throw new IOException("Unsupported operation: " + op);
  }
}
 
Example 24
Project: hadoop   File: WindowsSecureContainerExecutor.java   View source code 6 votes vote down vote up
@Override
protected OutputStream createOutputStreamWithMode(Path f, boolean append,
    FsPermission permission) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("EFS:createOutputStreamWithMode: %s %b %s", f,
        append, permission));
  }
  boolean success = false;
  OutputStream os = Native.Elevated.create(f, append);
  try {
    setPermission(f, permission);
    success = true;
    return os;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, os);
    }
  }
}
 
Example 25
Project: hadoop   File: TestAuditLogs.java   View source code 6 votes vote down vote up
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  FileStatus st = webfs.getFileStatus(file);

  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
 
Example 26
Project: hadoop   File: FSAclBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testRemoveAclEntriesMinimal() throws IOException {
  fs.create(path).close();
  fs.setPermission(path, FsPermission.createImmutable((short)0760));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, ALL),
    aclEntry(ACCESS, USER, "foo", ALL),
    aclEntry(ACCESS, GROUP, READ_WRITE),
    aclEntry(ACCESS, OTHER, NONE));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, "foo"),
    aclEntry(ACCESS, MASK));
  fs.removeAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] { }, returned);
  assertPermission((short)0760);
  assertAclFeature(false);
}
 
Example 27
Project: hadoop   File: RawLocalFileSystem.java   View source code 6 votes vote down vote up
private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission)
    throws IOException {
  if(f == null) {
    throw new IllegalArgumentException("mkdirs path arg is null");
  }
  Path parent = f.getParent();
  File p2f = pathToFile(f);
  File parent2f = null;
  if(parent != null) {
    parent2f = pathToFile(parent);
    if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
      throw new ParentNotDirectoryException("Parent path is not a directory: "
          + parent);
    }
  }
  if (p2f.exists() && !p2f.isDirectory()) {
    throw new FileNotFoundException("Destination exists" +
            " and is not a directory: " + p2f.getCanonicalPath());
  }
  return (parent == null || parent2f.exists() || mkdirs(parent)) &&
    (mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory());
}
 
Example 28
Project: hadoop   File: TestJobHistoryUtils.java   View source code 6 votes vote down vote up
private Path createPath(FileContext fc, Path root, int year, int month,
                        int day, String id) throws IOException {
  Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
          day + Path.SEPARATOR + id);
  fc.mkdir(path, FsPermission.getDirDefault(), true);
  return path;
}
 
Example 29
Project: hadoop-oss   File: FsShellPermissions.java   View source code 5 votes vote down vote up
@Override
protected void processPath(PathData item) throws IOException {
  short newperms = pp.applyNewPermission(item.stat);
  if (item.stat.getPermission().toShort() != newperms) {
    try {
      item.fs.setPermission(item.path, new FsPermission(newperms));
    } catch (IOException e) {
      LOG.debug("Error changing permissions of " + item, e);
      throw new IOException(
          "changing permissions of '" + item + "': " + e.getMessage());
    }
  }
}
 
Example 30
Project: hadoop   File: SwiftFileStatus.java   View source code 5 votes vote down vote up
public SwiftFileStatus(long length,
                       boolean isdir,
                       int block_replication,
                       long blocksize,
                       long modification_time,
                       long access_time,
                       FsPermission permission,
                       String owner, String group, Path path) {
  super(length, isdir, block_replication, blocksize, modification_time,
          access_time, permission, owner, group, path);
}
 
Example 31
Project: hadoop   File: FileContextTestWrapper.java   View source code 5 votes vote down vote up
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
    throws AccessControlException, FileAlreadyExistsException,
    FileNotFoundException, ParentNotDirectoryException,
    UnsupportedFileSystemException, IOException {
  fc.mkdir(dir, permission, createParent);
}
 
Example 32
Project: dremio-oss   File: TestHdfs.java   View source code 5 votes vote down vote up
@BeforeClass
public static void init() throws Exception {
  assumeNonMaprProfile();
  startMiniDfsCluster(TestHdfs.class.getName());
  String[] hostPort = dfsCluster.getNameNode().getHostAndPort().split(":");
  host = hostPort[0];
  port = Integer.parseInt(hostPort[1]);
  fs.mkdirs(new Path("/dir1/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  fs.mkdirs(new Path("/dir1/json"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  fs.mkdirs(new Path("/dir1/text"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  fs.mkdirs(new Path("/dir1/parquet"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  fs.mkdirs(new Path("/dir2"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  fs.copyFromLocalFile(false, true, new Path(FileUtils.getResourceAsFile("/datasets/users.json").getAbsolutePath()),
    new Path("/dir1/json/users.json"));
  fs.setPermission(new Path("/dir1/json/users.json"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  try (Timer.TimedBlock b = Timer.time("[email protected]")) {
    dremioDaemon = DACDaemon.newDremioDaemon(
      DACConfig
        .newDebugConfig(DremioTest.DEFAULT_SABOT_CONFIG)
        .autoPort(true)
        .allowTestApis(true)
        .writePath(folder.getRoot().getAbsolutePath())
        .clusterMode(ClusterMode.LOCAL)
        .serveUI(true),
      DremioTest.CLASSPATH_SCAN_RESULT,
      new DACDaemonModule(),
      new HDFSSourceConfigurator());
    dremioDaemon.init();
    dremioBinder = BaseTestServer.createBinder(dremioDaemon.getBindingProvider());
    JacksonJaxbJsonProvider provider = new JacksonJaxbJsonProvider();
    provider.setMapper(JSONUtil.prettyMapper());
    client = ClientBuilder.newBuilder().register(provider).register(MultiPartFeature.class).build();
  }
}
 
Example 33
Project: hadoop   File: HftpFileSystem.java   View source code 5 votes vote down vote up
@Override
public void startElement(String ns, String localname, String qname,
            Attributes attrs) throws SAXException {
  if ("listing".equals(qname)) return;
  if (!"file".equals(qname) && !"directory".equals(qname)) {
    if (RemoteException.class.getSimpleName().equals(qname)) {
      throw new SAXException(RemoteException.valueOf(attrs));
    }
    throw new SAXException("Unrecognized entry: " + qname);
  }
  long modif;
  long atime = 0;
  try {
    final SimpleDateFormat ldf = df.get();
    modif = ldf.parse(attrs.getValue("modified")).getTime();
    String astr = attrs.getValue("accesstime");
    if (astr != null) {
      atime = ldf.parse(astr).getTime();
    }
  } catch (ParseException e) { throw new SAXException(e); }
  FileStatus fs = "file".equals(qname)
    ? new FileStatus(
          Long.parseLong(attrs.getValue("size")), false,
          Short.valueOf(attrs.getValue("replication")).shortValue(),
          Long.parseLong(attrs.getValue("blocksize")),
          modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
          attrs.getValue("owner"), attrs.getValue("group"),
          HftpFileSystem.this.makeQualified(
              new Path(getUri().toString(), attrs.getValue("path"))))
    : new FileStatus(0L, true, 0, 0L,
          modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
          attrs.getValue("owner"), attrs.getValue("group"),
          HftpFileSystem.this.makeQualified(
              new Path(getUri().toString(), attrs.getValue("path"))));
  fslist.add(fs);
}
 
Example 34
Project: hadoop   File: ViewFs.java   View source code 5 votes vote down vote up
@Override
public void setPermission(final Path f, final FsPermission permission)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  res.targetFileSystem.setPermission(res.remainingPath, permission); 
  
}
 
Example 35
Project: dremio-oss   File: TestPseudoDistributedFileSystem.java   View source code 5 votes vote down vote up
@Test
public void testMkdirsRemoteFile() throws IOException {
  doReturn(true).when(mockLocalFS).mkdirs(
      new Path("/foo/bar/dir2"),
      FsPermission.getFileDefault());
  doReturn(true).when(mockRemoteFS).mkdirs(
      new Path("/foo/bar/dir2"),
      FsPermission.getFileDefault());

  Path path = new Path("/foo/bar/dir2");
  assertTrue(fs.mkdirs(path, FsPermission.getFileDefault()));
}
 
Example 36
Project: hadoop   File: TestEncryptionZones.java   View source code 5 votes vote down vote up
/**
 * Test running fsck on a system with encryption zones.
 */
@Test(timeout = 60000)
public void testFsckOnEncryptionZones() throws Exception {
  final int len = 8196;
  final Path zoneParent = new Path("/zones");
  final Path zone1 = new Path(zoneParent, "zone1");
  final Path zone1File = new Path(zone1, "file");
  fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
  dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
  DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
  ByteArrayOutputStream bStream = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(bStream, true);
  int errCode = ToolRunner.run(new DFSck(conf, out),
      new String[]{ "/" });
  assertEquals("Fsck ran with non-zero error code", 0, errCode);
  String result = bStream.toString();
  assertTrue("Fsck did not return HEALTHY status",
      result.contains(NamenodeFsck.HEALTHY_STATUS));

  // Run fsck directly on the encryption zone instead of root
  errCode = ToolRunner.run(new DFSck(conf, out),
      new String[]{ zoneParent.toString() });
  assertEquals("Fsck ran with non-zero error code", 0, errCode);
  result = bStream.toString();
  assertTrue("Fsck did not return HEALTHY status",
      result.contains(NamenodeFsck.HEALTHY_STATUS));
}
 
Example 37
Project: hadoop   File: WindowsSecureContainerExecutor.java   View source code 5 votes vote down vote up
@Override
public Path localizeClasspathJar(Path classPathJar, Path pwd, String owner) 
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("localizeClasspathJar: %s %s o:%s", 
        classPathJar, pwd, owner));
  }
  createDir(pwd,  new FsPermission(DIR_PERM), true, owner);
  String fileName = classPathJar.getName();
  Path dst = new Path(pwd, fileName);
  Native.Elevated.move(classPathJar, dst, true);
  Native.Elevated.chown(dst, owner, nodeManagerGroup);
  return dst;
}
 
Example 38
Project: hadoop-oss   File: ViewFileSystemBaseTest.java   View source code 5 votes vote down vote up
private void testRootReadableExecutableInternal(boolean located)
    throws IOException {
  // verify executable permission on root: cd /
  //
  Assert.assertFalse("In root before cd",
      fsView.getWorkingDirectory().isRoot());
  fsView.setWorkingDirectory(new Path("/"));
  Assert.assertTrue("Not in root dir after cd",
    fsView.getWorkingDirectory().isRoot());

  // verify readable
  //
  verifyRootChildren(listStatusInternal(located,
      fsView.getWorkingDirectory()));

  // verify permissions
  //
  final FileStatus rootStatus =
      fsView.getFileStatus(fsView.getWorkingDirectory());
  final FsPermission perms = rootStatus.getPermission();

  Assert.assertTrue("User-executable permission not set!",
      perms.getUserAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("User-readable permission not set!",
      perms.getUserAction().implies(FsAction.READ));
  Assert.assertTrue("Group-executable permission not set!",
      perms.getGroupAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Group-readable permission not set!",
      perms.getGroupAction().implies(FsAction.READ));
  Assert.assertTrue("Other-executable permission not set!",
      perms.getOtherAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Other-readable permission not set!",
      perms.getOtherAction().implies(FsAction.READ));
}
 
Example 39
Project: hadoop   File: TestSpeculativeExecution.java   View source code 5 votes vote down vote up
private Path createTempFile(String filename, String contents)
    throws IOException {
  Path path = new Path(TEST_ROOT_DIR, filename);
  FSDataOutputStream os = localFs.create(path);
  os.writeBytes(contents);
  os.close();
  localFs.setPermission(path, new FsPermission("700"));
  return path;
}
 
Example 40
Project: hadoop-oss   File: ChecksumFileSystem.java   View source code 5 votes vote down vote up
private FSDataOutputStream create(Path f, FsPermission permission,
    boolean overwrite, boolean createParent, int bufferSize,
    short replication, long blockSize,
    Progressable progress) throws IOException {
  Path parent = f.getParent();
  if (parent != null) {
    if (!createParent && !exists(parent)) {
      throw new FileNotFoundException("Parent directory doesn't exist: "
          + parent);
    } else if (!mkdirs(parent)) {
      throw new IOException("Mkdirs failed to create " + parent
          + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
          + ")");
    }
  }
  final FSDataOutputStream out;
  if (writeChecksum) {
    out = new FSDataOutputStream(
        new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
            blockSize, progress, permission), null);
  } else {
    out = fs.create(f, permission, overwrite, bufferSize, replication,
        blockSize, progress);
    // remove the checksum file since we aren't writing one
    Path checkFile = getChecksumFile(f);
    if (fs.exists(checkFile)) {
      fs.delete(checkFile, true);
    }
  }
  return out;
}