Java Code Examples for org.apache.hadoop.fs.FileSystem.setPermission()

The following are Jave code examples for showing how to use setPermission() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop-oss   File: TestCredentialProviderFactory.java   Source Code and License Vote up 7 votes
@Test
public void testJksProvider() throws Exception {
  Configuration conf = new Configuration();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  final String ourUrl =
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();

  File file = new File(tmpDir, "test.jks");
  file.delete();
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  Path path = ProviderUtils.unnestUri(new URI(ourUrl));
  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue(s.getPermission().toString().equals("rwx------"));
  assertTrue(file + " should exist", file.isFile());

  // check permission retention after explicit change
  fs.setPermission(path, new FsPermission("777"));
  checkPermissionRetention(conf, ourUrl, path);
}
 
Example 2
Project: hadoop-oss   File: TestCredentialProviderFactory.java   Source Code and License Vote up 7 votes
@Test
public void testLocalJksProvider() throws Exception {
  Configuration conf = new Configuration();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  final String ourUrl =
      LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();

  File file = new File(tmpDir, "test.jks");
  file.delete();
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  Path path = ProviderUtils.unnestUri(new URI(ourUrl));
  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rwx------"));
  assertTrue(file + " should exist", file.isFile());

  // check permission retention after explicit change
  fs.setPermission(path, new FsPermission("777"));
  checkPermissionRetention(conf, ourUrl, path);
}
 
Example 3
Project: hadoop   File: TestCredentialProviderFactory.java   Source Code and License Vote up 7 votes
@Test
public void testLocalJksProvider() throws Exception {
  Configuration conf = new Configuration();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  final String ourUrl =
      LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();

  File file = new File(tmpDir, "test.jks");
  file.delete();
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  Path path = ProviderUtils.unnestUri(new URI(ourUrl));
  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rwx------"));
  assertTrue(file + " should exist", file.isFile());

  // check permission retention after explicit change
  fs.setPermission(path, new FsPermission("777"));
  checkPermissionRetention(conf, ourUrl, path);
}
 
Example 4
Project: hadoop   File: TestHttpFSFileSystemLocalFileSystem.java   Source Code and License Vote up 7 votes
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example 5
Project: circus-train   File: S3MapReduceCpTestUtils.java   Source Code and License Vote up 6 votes
public static String createTestSetup(String baseDir, FileSystem fs, FsPermission perm) throws IOException {
  String base = getBase(baseDir);
  fs.mkdirs(new Path(base + "/newTest/hello/world1"));
  fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
  fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
  fs.setPermission(new Path(base + "/newTest"), perm);
  fs.setPermission(new Path(base + "/newTest/hello"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
  createFile(fs, new Path(base, "/newTest/1"));
  createFile(fs, new Path(base, "/newTest/hello/2"));
  createFile(fs, new Path(base, "/newTest/hello/world3/oldworld/3"));
  createFile(fs, new Path(base, "/newTest/hello/world2/4"));
  return base;
}
 
Example 6
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 6 votes
public static String createTestSetup(String baseDir,
                                     FileSystem fs,
                                     FsPermission perm) throws IOException {
  String base = getBase(baseDir);
  fs.mkdirs(new Path(base + "/newTest/hello/world1"));
  fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
  fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
  fs.setPermission(new Path(base + "/newTest"), perm);
  fs.setPermission(new Path(base + "/newTest/hello"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
  fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
  createFile(fs, new Path(base, "/newTest/1"));
  createFile(fs, new Path(base, "/newTest/hello/2"));
  createFile(fs, new Path(base, "/newTest/hello/world3/oldworld/3"));
  createFile(fs, new Path(base, "/newTest/hello/world2/4"));
  return base;
}
 
Example 7
Project: hadoop   File: DistCpV1.java   Source Code and License Vote up 6 votes
private static void updateDestStatus(FileStatus src, FileStatus dst,
    EnumSet<FileAttribute> preseved, FileSystem destFileSys
    ) throws IOException {
  String owner = null;
  String group = null;
  if (preseved.contains(FileAttribute.USER)
      && !src.getOwner().equals(dst.getOwner())) {
    owner = src.getOwner();
  }
  if (preseved.contains(FileAttribute.GROUP)
      && !src.getGroup().equals(dst.getGroup())) {
    group = src.getGroup();
  }
  if (owner != null || group != null) {
    destFileSys.setOwner(dst.getPath(), owner, group);
  }
  if (preseved.contains(FileAttribute.PERMISSION)
      && !src.getPermission().equals(dst.getPermission())) {
    destFileSys.setPermission(dst.getPath(), src.getPermission());
  }
  if (preseved.contains(FileAttribute.TIMES)) {
    destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
  }
}
 
Example 8
Project: hadoop   File: TestEncryptedShuffle.java   Source Code and License Vote up 6 votes
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
Example 9
Project: hadoop   File: TestStickyBit.java   Source Code and License Vote up 6 votes
/**
 * Test basic ability to get and set sticky bits on files and directories.
 */
private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
    throws IOException {
  // Initially sticky bit should not be set
  assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());

  // Same permission, but with sticky bit on
  short withSB;
  withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);

  assertTrue((new FsPermission(withSB)).getStickyBit());

  hdfs.setPermission(p, new FsPermission(withSB));
  assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());

  // Write a file to the fs, try to set its sticky bit
  Path f = new Path(baseDir, "somefile");
  writeFile(hdfs, f);
  assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());

  withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);

  hdfs.setPermission(f, new FsPermission(withSB));

  assertTrue(hdfs.getFileStatus(f).getPermission().getStickyBit());
}
 
Example 10
Project: hadoop   File: TestCredentialProviderFactory.java   Source Code and License Vote up 6 votes
@Test
public void testJksProvider() throws Exception {
  Configuration conf = new Configuration();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  final String ourUrl =
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();

  File file = new File(tmpDir, "test.jks");
  file.delete();
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  Path path = ProviderUtils.unnestUri(new URI(ourUrl));
  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue(s.getPermission().toString().equals("rwx------"));
  assertTrue(file + " should exist", file.isFile());

  // check permission retention after explicit change
  fs.setPermission(path, new FsPermission("777"));
  checkPermissionRetention(conf, ourUrl, path);
}
 
Example 11
Project: hadoop   File: LogAggregationService.java   Source Code and License Vote up 5 votes
private boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
    throws IOException {
  boolean exists = true;
  try {
    FileStatus appDirStatus = fs.getFileStatus(path);
    if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
      fs.setPermission(path, APP_DIR_PERMISSIONS);
    }
  } catch (FileNotFoundException fnfe) {
    exists = false;
  }
  return exists;
}
 
Example 12
Project: dremio-oss   File: TestHiveStorage.java   Source Code and License Vote up 5 votes
@Test
public void testCheckHasPermission() throws Exception {
  getSabotContext().getCatalogService().refreshSource(new NamespaceKey("hive"), CatalogService.REFRESH_EVERYTHING_NOW);
  NamespaceService ns = getSabotContext().getNamespaceService(SystemUser.SYSTEM_USERNAME);


  NamespaceKey dataset = new NamespaceKey(PathUtils.parseFullPath("hive.db1.kv_db1"));
  DatasetConfig datasetConfig = ns.getDataset(dataset);
  assertTrue(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));

  final Path tableFile = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1/000000_0");
  final Path tableDir = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1");
  final FileSystem localFs = FileSystem.getLocal(new Configuration());

  try {
    // no read on file
    localFs.setPermission(tableFile, new FsPermission(FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE));
    assertFalse(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));
  } finally {
    localFs.setPermission(tableFile, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  }

  try {
    // no exec on dir
    localFs.setPermission(tableDir, new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE));
    assertFalse(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));
  } finally {
    localFs.setPermission(tableDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  }
}
 
Example 13
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveReplicationOnFile() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 14
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveDefaults() throws IOException {
  FileSystem fs = FileSystem.get(config);
  
  // preserve replication, block size, user, group, permission, 
  // checksum type and timestamps    
  EnumSet<FileAttribute> attributes = 
      DistCpUtils.unpackAttributes(
          DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);
  
  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 15
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveReplicationOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setReplication(dst, (short) 2);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  // Replication shouldn't apply to dirs so this should still be 0 == 0
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
Example 16
Project: hadoop   File: TestNonExistentJob.java   Source Code and License Vote up 5 votes
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
 
Example 17
Project: hadoop   File: Util.java   Source Code and License Vote up 5 votes
/** Create a directory. */
static boolean createNonexistingDirectory(FileSystem fs, Path dir) throws IOException {
  if (fs.exists(dir)) {
    Util.err.println("dir (= " + dir + ") already exists.");
    return false;
  } else if (!fs.mkdirs(dir)) {
    throw new IOException("Cannot create working directory " + dir);
  }
  fs.setPermission(dir, new FsPermission((short)0777));
  return true;
}
 
Example 18
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreservePermissionOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
}
 
Example 19
Project: scheduling-connector-for-hadoop   File: HPCLogAggregateHandler.java   Source Code and License Vote up 5 votes
private void createDir(FileSystem fs, Path path, FsPermission fsPerm)
    throws IOException {
  FsPermission dirPerm = new FsPermission(fsPerm);
  fs.mkdirs(path, dirPerm);
  FsPermission umask = FsPermission.getUMask(fs.getConf());
  if (!dirPerm.equals(dirPerm.applyUMask(umask))) {
    fs.setPermission(path, new FsPermission(fsPerm));
  }
}
 
Example 20
Project: hadoop   File: TestCopyFiles.java   Source Code and License Vote up 4 votes
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}