Java Code Examples for org.apache.hadoop.fs.permission.FsPermission

The following examples show how to use org.apache.hadoop.fs.permission.FsPermission. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: hadoop-ozone   Source File: TestOzoneFileInterfaces.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * verify that a directory exists and is initialized correctly.
 * @param path of the directory
 * @return null indicates FILE_NOT_FOUND, else the FileStatus
 * @throws IOException
 */
private FileStatus getDirectoryStat(Path path) throws IOException {

  FileStatus status = null;

  try {
    status = fs.getFileStatus(path);
  } catch (FileNotFoundException e) {
    return null;
  }
  assertTrue("The created path is not directory.", status.isDirectory());

  assertEquals(FsPermission.getDirDefault(), status.getPermission());
  verifyOwnerGroup(status);

  assertEquals(0, status.getLen());

  return status;
}
 
Example 2
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 6 votes vote down vote up
public static CachePoolInfo convert (CachePoolInfoProto proto) {
  // Pool name is a required field, the rest are optional
  String poolName = checkNotNull(proto.getPoolName());
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (proto.hasOwnerName()) {
      info.setOwnerName(proto.getOwnerName());
  }
  if (proto.hasGroupName()) {
    info.setGroupName(proto.getGroupName());
  }
  if (proto.hasMode()) {
    info.setMode(new FsPermission((short)proto.getMode()));
  }
  if (proto.hasLimit())  {
    info.setLimit(proto.getLimit());
  }
  if (proto.hasMaxRelativeExpiry()) {
    info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
  }
  return info;
}
 
Example 3
Source Project: RDFS   Source File: DistCh.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * path:owner:group:permission
 * e.g.
 * /user/foo:foo:bar:700 
 */
FileOperation(String line) {
  try {
    String[] t = line.split(":", 4);
    for(int i = 0; i < t.length; i++) {
      if ("".equals(t[i])) {
        t[i] = null;
      }
    }

    src = new Path(t[0]);
    owner = t[1];
    group = t[2];
    permission = t[3] == null? null:
      new FsPermission(Short.parseShort(t[3], 8));

    checkState();
  }
  catch(Exception e) {
    throw (IllegalArgumentException)new IllegalArgumentException(
        "line=" + line).initCause(e);
  }
}
 
Example 4
Source Project: hadoop   Source File: NativeAzureFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
  Path absolutePath = makeAbsolute(p);
  String key = pathToKey(absolutePath);
  FileMetadata metadata = store.retrieveMetadata(key);
  if (metadata == null) {
    throw new FileNotFoundException("File doesn't exist: " + p);
  }
  permission = applyUMask(permission,
      metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
          : UMaskApplyMode.ChangeExistingFile);
  if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
    // It's an implicit folder, need to materialize it.
    store.storeEmptyFolder(key, createPermissionStatus(permission));
  } else if (!metadata.getPermissionStatus().getPermission().
      equals(permission)) {
    store.changePermissionStatus(key, new PermissionStatus(
        metadata.getPermissionStatus().getUserName(),
        metadata.getPermissionStatus().getGroupName(),
        permission));
  }
}
 
Example 5
Source Project: hadoop   Source File: FSAclBaseTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testModifyAclEntriesOnlyDefault() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
  fs.modifyAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, MASK, READ_EXECUTE),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010750);
  assertAclFeature(true);
}
 
Example 6
Source Project: hadoop   Source File: TestDFSPermission.java    License: Apache License 2.0 6 votes vote down vote up
private void createAndCheckPermission(OpType op, Path name, short umask,
    FsPermission permission, boolean delete) throws Exception {
  // create the file/directory
  create(op, name, umask, permission);

  // get the short form of the permission
  short permissionNum = (DEFAULT_PERMISSION.equals(permission)) ? MAX_PERMISSION
      : permission.toShort();

  // get the expected permission
  short expectedPermission = (op == OpType.CREATE) ? (short) (~umask
      & permissionNum) : (short) (~umask & permissionNum);

  // check if permission is correctly set
  checkPermission(name, expectedPermission, delete);
}
 
Example 7
Source Project: big-c   Source File: S3AFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create an FSDataOutputStream at the indicated Path with write-progress
 * reporting.
 * @param f the file name to open
 * @param permission
 * @param overwrite if a file with this name already exists, then if true,
 *   the file will be overwritten, and if false an error will be thrown.
 * @param bufferSize the size of the buffer to be used.
 * @param replication required block replication for the file.
 * @param blockSize
 * @param progress
 * @throws IOException
 * @see #setPermission(Path, FsPermission)
 */
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, 
  int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
  String key = pathToKey(f);

  if (!overwrite && exists(f)) {
    throw new FileAlreadyExistsException(f + " already exists");
  }
  if (getConf().getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD)) {
    return new FSDataOutputStream(new S3AFastOutputStream(s3, this, bucket,
        key, progress, statistics, cannedACL,
        serverSideEncryptionAlgorithm, partSize, (long)multiPartThreshold,
        threadPoolExecutor), statistics);
  }
  // We pass null to FSDataOutputStream so it won't count writes that are being buffered to a file
  return new FSDataOutputStream(new S3AOutputStream(getConf(), transfers, this,
    bucket, key, progress, cannedACL, statistics, 
    serverSideEncryptionAlgorithm), null);
}
 
Example 8
Source Project: distributedlog   Source File: DLFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public FSDataOutputStream create(Path path,
                                 FsPermission fsPermission,
                                 boolean overwrite,
                                 int bufferSize,
                                 short replication,
                                 long blockSize,
                                 Progressable progressable) throws IOException {
    // for overwrite, delete the existing file first.
    if (overwrite) {
        delete(path, false);
    }

    DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
    confLocal.addConfiguration(dlConf);
    confLocal.setEnsembleSize(replication);
    confLocal.setWriteQuorumSize(replication);
    confLocal.setAckQuorumSize(replication);
    confLocal.setMaxLogSegmentBytes(blockSize);
    return append(path, bufferSize, Optional.of(confLocal));
}
 
Example 9
Source Project: hadoop   Source File: DefaultContainerExecutor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Initialize the local directories for a particular user.
 * <ul>.mkdir
 * <li>$local.dir/usercache/$user</li>
 * </ul>
 */
void createUserLocalDirs(List<String> localDirs, String user)
    throws IOException {
  boolean userDirStatus = false;
  FsPermission userperms = new FsPermission(USER_PERM);
  for (String localDir : localDirs) {
    // create $local.dir/usercache/$user and its immediate parent
    try {
      createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user);
    } catch (IOException e) {
      LOG.warn("Unable to create the user directory : " + localDir, e);
      continue;
    }
    userDirStatus = true;
  }
  if (!userDirStatus) {
    throw new IOException("Not able to initialize user directories "
        + "in any of the configured local directories for user " + user);
  }
}
 
Example 10
Source Project: big-c   Source File: TestJsonUtil.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
 
Example 11
Source Project: nifi   Source File: PutHDFSTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPutFilePermissionsWithNoConfiguredUmask() throws IOException {
    // assert the file permission fallback works. It should read FsPermission.DEFAULT_UMASK
    MockFileSystem fileSystem = new MockFileSystem();
    PutHDFS proc = new TestablePutHDFS(kerberosProperties, fileSystem);
    TestRunner runner = TestRunners.newTestRunner(proc);
    runner.setProperty(PutHDFS.DIRECTORY, "target/test-classes");
    runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace");
    // invoke the abstractOnScheduled method so the Hadoop configuration is available to apply the MockFileSystem instance
    proc.abstractOnScheduled(runner.getProcessContext());
    fileSystem.setConf(proc.getConfiguration());
    try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) {
        Map<String, String> attributes = new HashMap<>();
        attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1");
        runner.enqueue(fis, attributes);
        runner.run();
    }
    assertEquals(FsPermission.getFileDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK)),
        fileSystem.getFileStatus(new Path("target/test-classes/randombytes-1")).getPermission());
}
 
Example 12
Source Project: big-c   Source File: TestFavoredNodesEndToEnd.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
  // create 10 files with random preferred nodes
  for (int i = 0; i < NUM_FILES; i++) {
    Random rand = new Random(System.currentTimeMillis() + i);
    // pass a new created rand so as to get a uniform distribution each time
    // without too much collisions (look at the do-while loop in getDatanodes)
    InetSocketAddress datanode[] = getDatanodes(rand);
    Path p = new Path("/filename" + i);
    // create and close the file.
    dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
        null, null).close();
    // re-open for append
    FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
        4096, null, datanode);
    out.write(SOME_BYTES);
    out.close();
    BlockLocation[] locations = getBlockLocations(p);
    // verify the files got created in the right nodes
    for (BlockLocation loc : locations) {
      String[] hosts = loc.getNames();
      String[] hosts1 = getStringForInetSocketAddrs(datanode);
      assertTrue(compareNodes(hosts, hosts1));
    }
  }
}
 
Example 13
/** @throws Exception If failed. */
@Test
public void testDeleteRecursively() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");

    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    Path someDir2 = new Path(fsHome, "/someDir1/someDir2");

    assertTrue(fs.delete(someDir2, true));

    assertPathDoesNotExist(fs, someDir2);
    assertPathDoesNotExist(fs, someDir3);
}
 
Example 14
Source Project: datacollector   Source File: AzureFile.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Map<String, Object> getFileMetadata() {
  Map<String, Object>  metadata = new HashMap<>();
  metadata.put(HeaderAttributeConstants.FILE_NAME, status.getPath().getName());
  metadata.put(HeaderAttributeConstants.FILE, status.getPath().toUri().getPath());
  metadata.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, status.getModificationTime());
  metadata.put(HeaderAttributeConstants.LAST_ACCESS_TIME, status.getAccessTime());
  metadata.put(HeaderAttributeConstants.IS_DIRECTORY, status.isDirectory());
  metadata.put(HeaderAttributeConstants.IS_SYMBOLIC_LINK, status.isSymlink());
  metadata.put(HeaderAttributeConstants.SIZE, status.getLen());
  metadata.put(HeaderAttributeConstants.OWNER, status.getOwner());
  metadata.put(HeaderAttributeConstants.GROUP, status.getGroup());
  metadata.put(HeaderAttributeConstants.BLOCK_SIZE, status.getBlockSize());
  metadata.put(HeaderAttributeConstants.REPLICATION, status.getReplication());
  metadata.put(HeaderAttributeConstants.IS_ENCRYPTED, status.isEncrypted());

  FsPermission permission = status.getPermission();
  if (permission != null) {
    metadata.put(PERMISSIONS, permission.toString());
  }

  return metadata;
}
 
Example 15
Source Project: ignite   Source File: HadoopIgfsProperties.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Constructor.
 *
 * @param props Properties.
 * @throws IgniteException In case of error.
 */
public HadoopIgfsProperties(Map<String, String> props) throws IgniteException {
    if (props == null)
        return;

    usrName = props.get(IgfsUtils.PROP_USER_NAME);
    grpName = props.get(IgfsUtils.PROP_GROUP_NAME);

    String permStr = props.get(IgfsUtils.PROP_PERMISSION);

    if (permStr != null) {
        try {
            perm = new FsPermission((short)Integer.parseInt(permStr, 8));
        }
        catch (NumberFormatException ignore) {
            throw new IgniteException("Permissions cannot be parsed: " + permStr);
        }
    }
}
 
Example 16
Source Project: big-c   Source File: TestEditLog.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
 
Example 17
Source Project: hadoop   Source File: TestListFilesInFileContext.java    License: Apache License 2.0 6 votes vote down vote up
/** Test when input path is a file */
@Test
public void testFile() throws IOException {
  fc.mkdir(TEST_DIR, FsPermission.getDefault(), true);
  writeFile(fc, FILE1, FILE_LEN);

  RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
      FILE1, true);
  LocatedFileStatus stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
  
  itor = fc.util().listFiles(FILE1, false);
  stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
}
 
Example 18
Source Project: big-c   Source File: RawLocalFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
private LocalFSFileOutputStream(Path f, boolean append,
    FsPermission permission) throws IOException {
  File file = pathToFile(f);
  if (permission == null) {
    this.fos = new FileOutputStream(file, append);
  } else {
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
          append, permission.toShort());
    } else {
      this.fos = new FileOutputStream(file, append);
      boolean success = false;
      try {
        setPermission(f, permission);
        success = true;
      } finally {
        if (!success) {
          IOUtils.cleanup(LOG, this.fos);
        }
      }
    }
  }
}
 
Example 19
Source Project: big-c   Source File: TestGlobPaths.java    License: Apache License 2.0 6 votes vote down vote up
void run() throws Exception {
  // Verify that the default scheme is hdfs, when we don't supply one.
  wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
      false);
  wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
      + "/alphaLink"), false);
  FileStatus statuses[] = wrap.globStatus(
      new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
  Assert.assertEquals(1, statuses.length);
  Path path = statuses[0].getPath();
  Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
  Assert.assertEquals("hdfs", path.toUri().getScheme());

  // FileContext can list a file:/// URI.
  // Since everyone should have the root directory, we list that.
  statuses = fc.util().globStatus(new Path("file:///"),
      new AcceptAllPathFilter());
  Assert.assertEquals(1, statuses.length);
  Path filePath = statuses[0].getPath();
  Assert.assertEquals("file", filePath.toUri().getScheme());
  Assert.assertEquals("/", filePath.toUri().getPath());

  // The FileSystem should have scheme 'hdfs'
  Assert.assertEquals("hdfs", fs.getScheme());
}
 
Example 20
Source Project: hadoop   Source File: TestEditLog.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
 
Example 21
Source Project: big-c   Source File: FSPermissionChecker.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
    throws AccessControlException {
  FsPermission mode = pool.getMode();
  if (isSuperUser()) {
    return;
  }
  if (getUser().equals(pool.getOwnerName())
      && mode.getUserAction().implies(access)) {
    return;
  }
  if (getGroups().contains(pool.getGroupName())
      && mode.getGroupAction().implies(access)) {
    return;
  }
  if (mode.getOtherAction().implies(access)) {
    return;
  }
  throw new AccessControlException("Permission denied while accessing pool "
      + pool.getPoolName() + ": user " + getUser() + " does not have "
      + access.toString() + " permissions.");
}
 
Example 22
Source Project: hadoop   Source File: TestDistCacheEmulation.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test the configuration property for disabling/enabling emulation of
 * distributed cache load.
 */
@Test  (timeout=2000)
public void testDistCacheEmulationConfigurability() throws IOException {
  Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
  Path ioPath = new Path("testDistCacheEmulationConfigurability")
      .makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
  FileSystem fs = FileSystem.get(jobConf);
  FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));

  // default config
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertTrue("Default configuration of "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " is wrong.", dce.shouldEmulateDistCacheLoad());

  // config property set to false
  jobConf.setBoolean(
      DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertFalse("Disabling of emulation of distributed cache load by setting "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " to false is not working.", dce.shouldEmulateDistCacheLoad());
}
 
Example 23
/** @throws Exception If failed. */
@Test
public void testDeleteRecursivelyFromRoot() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");

    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    Path root = new Path(fsHome, "/");

    assertFalse(fs.delete(root, true));

    assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true));

    assertPathDoesNotExist(fs, someDir3);
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
    assertPathExists(fs, root);
}
 
Example 24
@Override
public FsPermission getFsPermission(
    INodeAuthorizationInfo node, int snapshotId) {
  FsPermission permission;
  String[] pathElements = getPathElements(node);
  if (!isSentryManaged(pathElements)) {
    permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
  } else {
    FsPermission returnPerm = this.permission;
    // Handle case when prefix directory is itself associated with an
    // authorizable object (default db directory in hive)
    // An executable permission needs to be set on the the prefix directory
    // in this case.. else, subdirectories (which map to other dbs) will
    // not be travesible.
    for (String [] prefixPath : authzInfo.getPathPrefixes()) {
      if (Arrays.equals(prefixPath, pathElements)) {
        returnPerm = FsPermission.createImmutable((short)(returnPerm.toShort() | 0x01));
        break;
      }
    }
    permission = returnPerm;
  }
  return permission;
}
 
Example 25
Source Project: ranger   Source File: HDFSRangerTest.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(String baseDir, Integer index) throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    String dirName = baseDir + (index != null ? String.valueOf(index) : "");
    String fileName = dirName + "/dummy-data";
    final Path file = new Path(fileName);
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
}
 
Example 26
Source Project: RDFS   Source File: NNThroughputBenchmark.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Do file create.
 */
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = System.currentTimeMillis();
  // dummyActionNoSynch(fileIdx);
  nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, true, true, replication, BLOCK_SIZE);
  long end = System.currentTimeMillis();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
  return end-start;
}
 
Example 27
Source Project: hadoop   Source File: ViewFileSystemBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRootReadableExecutable() throws IOException {
  // verify executable permission on root: cd /
  //
  Assert.assertFalse("In root before cd",
      fsView.getWorkingDirectory().isRoot());
  fsView.setWorkingDirectory(new Path("/"));
  Assert.assertTrue("Not in root dir after cd",
    fsView.getWorkingDirectory().isRoot());

  // verify readable
  //
  verifyRootChildren(fsView.listStatus(fsView.getWorkingDirectory()));

  // verify permissions
  //
  final FileStatus rootStatus =
      fsView.getFileStatus(fsView.getWorkingDirectory());
  final FsPermission perms = rootStatus.getPermission();

  Assert.assertTrue("User-executable permission not set!",
      perms.getUserAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("User-readable permission not set!",
      perms.getUserAction().implies(FsAction.READ));
  Assert.assertTrue("Group-executable permission not set!",
      perms.getGroupAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Group-readable permission not set!",
      perms.getGroupAction().implies(FsAction.READ));
  Assert.assertTrue("Other-executable permission not set!",
      perms.getOtherAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Other-readable permission not set!",
      perms.getOtherAction().implies(FsAction.READ));
}
 
Example 28
Source Project: big-c   Source File: TestHdfsCryptoStreams.java    License: Apache License 2.0 5 votes vote down vote up
@Before
@Override
public void setUp() throws IOException {
  ++pathCount;
  path = new Path("/p" + pathCount);
  file = new Path(path, "file");
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0700));

  super.setUp();
}
 
Example 29
@Test
public void testMkdirs_shouldReturnDefaultFilePermissions()
    throws IOException, URISyntaxException {
  Configuration config = GoogleHadoopFileSystemIntegrationHelper.getTestConfig();
  config.set("fs.gs.reported.permissions", "357");
  GoogleHadoopFS ghfs = new GoogleHadoopFS(initUri, config);

  FsPermission permission = new FsPermission("000");
  FsPermission expectedPermission = new FsPermission("357");

  Path path = new Path(initUri.resolve("/testMkdirs_shouldRespectFilePermissions").toString());
  ghfs.mkdir(path, permission, /* createParent= */ true);

  assertThat(ghfs.getFileStatus(path).getPermission()).isEqualTo(expectedPermission);
}
 
Example 30
Source Project: jsr203-hadoop   Source File: HadoopPosixFileAttributes.java    License: Apache License 2.0 5 votes vote down vote up
public HadoopPosixFileAttributes(HadoopFileSystem hdfs, Object fileKey,
    FileStatus fileStatus) throws IOException {
  super(fileKey, fileStatus);
  this.owner = hdfs.getUserPrincipalLookupService()
      .lookupPrincipalByGroupName(fileStatus.getOwner());
  this.group = hdfs.getUserPrincipalLookupService()
      .lookupPrincipalByGroupName(fileStatus.getGroup());
  FsPermission fsPermission = getFileStatus().getPermission();
  String perms = fsPermission.getUserAction().SYMBOL
      + fsPermission.getGroupAction().SYMBOL
      + fsPermission.getOtherAction().SYMBOL;
  this.permissions = PosixFilePermissions.fromString(perms);
}