org.apache.hadoop.fs.FsShell Java Examples
The following examples show how to use
org.apache.hadoop.fs.FsShell.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDFSShell.java From RDFS with Apache License 2.0 | 6 votes |
public void testLsr() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); try { final String root = createTree(dfs, "lsr"); dfs.mkdirs(new Path(root, "zzz")); runLsr(new FsShell(conf), root, 0); final Path sub = new Path(root, "sub"); dfs.setPermission(sub, new FsPermission((short)0)); final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); final String tmpusername = ugi.getUserName() + "1"; UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation( tmpusername, new String[] {tmpusername}); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI); String results = runLsr(new FsShell(conf), root, -1); assertTrue(results.contains("zzz")); } finally { cluster.shutdown(); } }
Example #2
Source File: TestHadoopArchives.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testRelativePathWitRepl() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); createFile(inputPath, fs, sub1.getName(), "a"); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, "input"); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchiveWithRepl(); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #3
Source File: TestDFSShellGenericOptions.java From hadoop with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #4
Source File: TestDFSShellGenericOptions.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = new DistributedFileSystem(NameNode.getAddress(namenode), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #5
Source File: TestFsShellPermission.java From hadoop with Apache License 2.0 | 6 votes |
public void execute(Configuration conf, FileSystem fs) throws Exception { fs.mkdirs(new Path(TEST_ROOT)); createFiles(fs, TEST_ROOT, fileEntries); final FsShell fsShell = new FsShell(conf); final String deletePath = TEST_ROOT + "/" + deleteEntry.getPath(); String[] tmpCmdOpts = StringUtils.split(cmdAndOptions); ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts)); tmpArray.add(deletePath); final String[] cmdOpts = tmpArray.toArray(new String[tmpArray.size()]); userUgi.doAs(new PrivilegedExceptionAction<String>() { public String run() throws Exception { return execCmd(fsShell, cmdOpts); } }); boolean deleted = !fs.exists(new Path(deletePath)); assertEquals(expectedToDelete, deleted); deldir(fs, TEST_ROOT); }
Example #6
Source File: TestSnapshotRename.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testRenameSnapshotCommandWithIllegalArguments() throws Exception { ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream psOut = new PrintStream(out); System.setOut(psOut); System.setErr(psOut); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv1 = {"-renameSnapshot", "/tmp", "s1"}; int val = shell.run(argv1); assertTrue(val == -1); assertTrue(out.toString().contains( argv1[0] + ": Incorrect number of arguments.")); out.reset(); String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"}; val = shell.run(argv2); assertTrue(val == -1); assertTrue(out.toString().contains( argv2[0] + ": Incorrect number of arguments.")); psOut.close(); out.close(); }
Example #7
Source File: TestSnapshotDeletion.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception { ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream psOut = new PrintStream(out); System.setOut(psOut); System.setErr(psOut); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv1 = {"-deleteSnapshot", "/tmp"}; int val = shell.run(argv1); assertTrue(val == -1); assertTrue(out.toString().contains( argv1[0] + ": Incorrect number of arguments.")); out.reset(); String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"}; val = shell.run(argv2); assertTrue(val == -1); assertTrue(out.toString().contains( argv2[0] + ": Incorrect number of arguments.")); psOut.close(); out.close(); }
Example #8
Source File: TestHadoopArchives.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testSingleFile() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); String singleFileName = "a"; createFile(inputPath, fs, sub1.getName(), singleFileName); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, sub1.toString()); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(sub1, singleFileName); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #9
Source File: TestHadoopArchives.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGlobFiles() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); final Path sub2 = new Path(inputPath, "dir2"); fs.mkdirs(sub1); String fileName = "a"; createFile(inputPath, fs, sub1.getName(), fileName); createFile(inputPath, fs, sub2.getName(), fileName); createFile(inputPath, fs, sub1.getName(), "b"); // not part of result final String glob = "dir{1,2}/a"; final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, inputPath.toString(), inputPath + "/" + glob); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(inputPath, glob); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr, fullHarPathStr + "/" + glob); Assert.assertEquals(originalPaths, harPaths); }
Example #10
Source File: TestDFSShellGenericOptions.java From big-c with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #11
Source File: TestFsShellPermission.java From big-c with Apache License 2.0 | 6 votes |
public void execute(Configuration conf, FileSystem fs) throws Exception { fs.mkdirs(new Path(TEST_ROOT)); createFiles(fs, TEST_ROOT, fileEntries); final FsShell fsShell = new FsShell(conf); final String deletePath = TEST_ROOT + "/" + deleteEntry.getPath(); String[] tmpCmdOpts = StringUtils.split(cmdAndOptions); ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts)); tmpArray.add(deletePath); final String[] cmdOpts = tmpArray.toArray(new String[tmpArray.size()]); userUgi.doAs(new PrivilegedExceptionAction<String>() { public String run() throws Exception { return execCmd(fsShell, cmdOpts); } }); boolean deleted = !fs.exists(new Path(deletePath)); assertEquals(expectedToDelete, deleted); deldir(fs, TEST_ROOT); }
Example #12
Source File: TestSnapshotDeletion.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception { ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream psOut = new PrintStream(out); System.setOut(psOut); System.setErr(psOut); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv1 = {"-deleteSnapshot", "/tmp"}; int val = shell.run(argv1); assertTrue(val == -1); assertTrue(out.toString().contains( argv1[0] + ": Incorrect number of arguments.")); out.reset(); String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"}; val = shell.run(argv2); assertTrue(val == -1); assertTrue(out.toString().contains( argv2[0] + ": Incorrect number of arguments.")); psOut.close(); out.close(); }
Example #13
Source File: TestSnapshotRename.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testRenameSnapshotCommandWithIllegalArguments() throws Exception { ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream psOut = new PrintStream(out); System.setOut(psOut); System.setErr(psOut); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv1 = {"-renameSnapshot", "/tmp", "s1"}; int val = shell.run(argv1); assertTrue(val == -1); assertTrue(out.toString().contains( argv1[0] + ": Incorrect number of arguments.")); out.reset(); String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"}; val = shell.run(argv2); assertTrue(val == -1); assertTrue(out.toString().contains( argv2[0] + ": Incorrect number of arguments.")); psOut.close(); out.close(); }
Example #14
Source File: TestOzoneFsHAURLs.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Helper function for testOtherDefaultFS(), * run fs -ls o3fs:/// against different fs.defaultFS input. * * @param defaultFS Desired fs.defaultFS to be used in the test * @throws Exception */ private void testWithDefaultFS(String defaultFS) throws Exception { OzoneConfiguration clientConf = new OzoneConfiguration(conf); clientConf.setQuietMode(false); clientConf.set(o3fsImplKey, o3fsImplValue); // fs.defaultFS = file:/// clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS); FsShell shell = new FsShell(clientConf); try { // Test case: ozone fs -ls o3fs:/// // Expectation: Fail. fs.defaultFS is not a qualified o3fs URI. int res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"}); Assert.assertEquals(res, -1); } finally { shell.close(); } }
Example #15
Source File: TestDFSShellGenericOptions.java From RDFS with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = new DistributedFileSystem(NameNode.getAddress(namenode), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #16
Source File: Pig.java From spork with Apache License 2.0 | 6 votes |
/** * Run a filesystem command. Any output from this command is written to * stdout or stderr as appropriate. * @param cmd Filesystem command to run along with its arguments as one * string. * @throws IOException */ public static int fs(String cmd) throws IOException { ScriptPigContext ctx = getScriptContext(); FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx .getPigContext().getProperties())); int code = -1; if (cmd != null) { String[] cmdTokens = cmd.split("\\s+"); if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0]; try { code = shell.run(cmdTokens); } catch (Exception e) { throw new IOException("Run filesystem command failed", e); } } return code; }
Example #17
Source File: TestDFSShell.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void testLsr() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); try { final String root = createTree(dfs, "lsr"); dfs.mkdirs(new Path(root, "zzz")); runLsr(new FsShell(conf), root, 0); final Path sub = new Path(root, "sub"); dfs.setPermission(sub, new FsPermission((short)0)); final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); final String tmpusername = ugi.getUserName() + "1"; UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation( tmpusername, new String[] {tmpusername}); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI); String results = runLsr(new FsShell(conf), root, -1); assertTrue(results.contains("zzz")); } finally { cluster.shutdown(); } }
Example #18
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGlobFiles() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); final Path sub2 = new Path(inputPath, "dir2"); fs.mkdirs(sub1); String fileName = "a"; createFile(inputPath, fs, sub1.getName(), fileName); createFile(inputPath, fs, sub2.getName(), fileName); createFile(inputPath, fs, sub1.getName(), "b"); // not part of result final String glob = "dir{1,2}/a"; final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, inputPath.toString(), inputPath + "/" + glob); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(inputPath, glob); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr, fullHarPathStr + "/" + glob); Assert.assertEquals(originalPaths, harPaths); }
Example #19
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSingleFile() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); String singleFileName = "a"; createFile(inputPath, fs, sub1.getName(), singleFileName); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, sub1.toString()); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(sub1, singleFileName); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #20
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testPathWithSpaces() throws Exception { // create files/directories with spaces createFile(inputPath, fs, "c c"); final Path sub1 = new Path(inputPath, "sub 1"); fs.mkdirs(sub1); createFile(sub1, fs, "file x y z"); createFile(sub1, fs, "file"); createFile(sub1, fs, "x"); createFile(sub1, fs, "y"); createFile(sub1, fs, "z"); final Path sub2 = new Path(inputPath, "sub 1 with suffix"); fs.mkdirs(sub2); createFile(sub2, fs, "z"); final FsShell shell = new FsShell(conf); final String inputPathStr = inputPath.toUri().getPath(); final List<String> originalPaths = lsr(shell, inputPathStr); // make the archive: final String fullHarPathStr = makeArchive(); // compare results final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #21
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testRelativePathWitRepl() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); createFile(inputPath, fs, sub1.getName(), "a"); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, "input"); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchiveWithRepl(); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #22
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testRelativePath() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); createFile(inputPath, fs, sub1.getName(), "a"); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, "input"); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #23
Source File: TestXAttrWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that users can copy a snapshot while preserving its xattrs. */ @Test (timeout = 120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception { FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); hdfs.setXAttr(path, name1, value1); hdfs.setXAttr(path, name2, value2); SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); Path snapshotCopy = new Path(path.toString() + "-copy"); String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(), snapshotCopy.toUri().toString() }; int ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("cp -px is not working on a snapshot", SUCCESS, ret); Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy); assertArrayEquals(value1, xattrs.get(name1)); assertArrayEquals(value2, xattrs.get(name2)); }
Example #24
Source File: TestDFSShell.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Test chmod. */ void testChmod(Configuration conf, FileSystem fs, String chmodDir) throws IOException { FsShell shell = new FsShell(); shell.setConf(conf); try { //first make dir Path dir = new Path(chmodDir); fs.delete(dir, true); fs.mkdirs(dir); runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir); assertEquals("rwxrw----", fs.getFileStatus(dir).getPermission().toString()); //create an empty file Path file = new Path(chmodDir, "file"); TestDFSShell.writeFile(fs, file); //test octal mode runCmd(shell, "-chmod", "644", file.toString()); assertEquals("rw-r--r--", fs.getFileStatus(file).getPermission().toString()); //test recursive runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir); assertEquals("rwxrwxrwx", fs.getFileStatus(dir).getPermission().toString()); assertEquals("rw-rw-rw-", fs.getFileStatus(file).getPermission().toString()); fs.delete(dir, true); } finally { try { fs.close(); shell.close(); } catch (IOException ignored) {} } }
Example #25
Source File: TestUnderReplicatedBlocks.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { Configuration conf = new Configuration(); final short REPLICATION_FACTOR = 2; final String FILE_NAME = "/testFile"; final Path FILE_PATH = new Path(FILE_NAME); MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR+1, true, null); try { // create a file with one block with a replication factor of 2 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L); DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR); // remove one replica from the blocksMap so block becomes under-replicated // but the block does not get put into the under-replicated blocks queue FSNamesystem namesystem = cluster.getNameNode().namesystem; Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH); DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next(); namesystem.addToInvalidates(b, dn); namesystem.blocksMap.removeNode(b, dn); // increment this file's replication factor FsShell shell = new FsShell(conf); assertEquals(0, shell.run(new String[]{ "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME})); } finally { cluster.shutdown(); } }
Example #26
Source File: TestFsShellPermission.java From hadoop with Apache License 2.0 | 5 votes |
static String execCmd(FsShell shell, final String[] args) throws Exception { ByteArrayOutputStream baout = new ByteArrayOutputStream(); PrintStream out = new PrintStream(baout, true); PrintStream old = System.out; System.setOut(out); int ret = shell.run(args); out.close(); System.setOut(old); return String.valueOf(ret); }
Example #27
Source File: TestCopyFiles.java From big-c with Apache License 2.0 | 5 votes |
static String execCmd(FsShell shell, String... args) throws Exception { ByteArrayOutputStream baout = new ByteArrayOutputStream(); PrintStream out = new PrintStream(baout, true); PrintStream old = System.out; System.setOut(out); shell.run(args); out.close(); System.setOut(old); return baout.toString(); }
Example #28
Source File: Gridmix.java From big-c with Apache License 2.0 | 5 votes |
/** * Write random bytes at the path <inputDir> if needed. * @see org.apache.hadoop.mapred.gridmix.GenerateData * @return exit status */ @SuppressWarnings("deprecation") protected int writeInputData(long genbytes, Path inputDir) throws IOException, InterruptedException { if (genbytes > 0) { final Configuration conf = getConf(); if (inputDir.getFileSystem(conf).exists(inputDir)) { LOG.error("Gridmix input data directory " + inputDir + " already exists when -generate option is used.\n"); return STARTUP_FAILED_ERROR; } // configure the compression ratio if needed CompressionEmulationUtil.setupDataGeneratorConfig(conf); final GenerateData genData = new GenerateData(conf, inputDir, genbytes); LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) + " of test data..."); launchGridmixJob(genData); FsShell shell = new FsShell(conf); try { LOG.info("Changing the permissions for inputPath " + inputDir.toString()); shell.run(new String[] {"-chmod","-R","777", inputDir.toString()}); } catch (Exception e) { LOG.error("Couldnt change the file permissions " , e); throw new IOException(e); } LOG.info("Input data generation successful."); } return 0; }
Example #29
Source File: TestDFSShell.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test chmod. */ void testChmod(Configuration conf, FileSystem fs, String chmodDir) throws IOException { FsShell shell = new FsShell(); shell.setConf(conf); try { //first make dir Path dir = new Path(chmodDir); fs.delete(dir, true); fs.mkdirs(dir); runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir); assertEquals("rwxrw----", fs.getFileStatus(dir).getPermission().toString()); //create an empty file Path file = new Path(chmodDir, "file"); TestDFSShell.writeFile(fs, file); //test octal mode runCmd(shell, "-chmod", "644", file.toString()); assertEquals("rw-r--r--", fs.getFileStatus(file).getPermission().toString()); //test recursive runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir); assertEquals("rwxrwxrwx", fs.getFileStatus(dir).getPermission().toString()); assertEquals("rw-rw-rw-", fs.getFileStatus(file).getPermission().toString()); fs.delete(dir, true); } finally { try { fs.close(); shell.close(); } catch (IOException ignored) {} } }
Example #30
Source File: CommandExecutor.java From RDFS with Apache License 2.0 | 5 votes |
public static int executeFSCommand(final String cmd, final String namenode) { exitCode = 0; ByteArrayOutputStream bao = new ByteArrayOutputStream(); PrintStream origOut = System.out; PrintStream origErr = System.err; System.setOut(new PrintStream(bao)); System.setErr(new PrintStream(bao)); FsShell shell = new FsShell(); String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode); cmdExecuted = cmd; try { ToolRunner.run(shell, args); } catch (Exception e) { e.printStackTrace(); lastException = e; exitCode = -1; } finally { System.setOut(origOut); System.setErr(origErr); } commandOutput = bao.toString(); return exitCode; }