org.apache.hadoop.fs.FileSystemTestHelper Java Examples
The following examples show how to use
org.apache.hadoop.fs.FileSystemTestHelper.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BaseTestHttpFSWith.java From hadoop with Apache License 2.0 | 6 votes |
private void testTruncate() throws Exception { if (!isLocalFS()) { final short repl = 3; final int blockSize = 1024; final int numOfBlocks = 2; FileSystem fs = FileSystem.get(getProxiedFSConf()); fs.mkdirs(getProxiedFSTestDir()); Path file = new Path(getProxiedFSTestDir(), "foo.txt"); final byte[] data = FileSystemTestHelper.getFileData( numOfBlocks, blockSize); FileSystemTestHelper.createFile(fs, file, data, blockSize, repl); final int newLength = blockSize; boolean isReady = fs.truncate(file, newLength); Assert.assertTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fs.getFileStatus(file); Assert.assertEquals(fileStatus.getLen(), newLength); AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString()); fs.close(); } }
Example #2
Source File: TestReservedRawPaths.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #3
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #4
Source File: BaseTestHttpFSWith.java From big-c with Apache License 2.0 | 6 votes |
private void testTruncate() throws Exception { if (!isLocalFS()) { final short repl = 3; final int blockSize = 1024; final int numOfBlocks = 2; FileSystem fs = FileSystem.get(getProxiedFSConf()); fs.mkdirs(getProxiedFSTestDir()); Path file = new Path(getProxiedFSTestDir(), "foo.txt"); final byte[] data = FileSystemTestHelper.getFileData( numOfBlocks, blockSize); FileSystemTestHelper.createFile(fs, file, data, blockSize, repl); final int newLength = blockSize; boolean isReady = fs.truncate(file, newLength); Assert.assertTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fs.getFileStatus(file); Assert.assertEquals(fileStatus.getLen(), newLength); AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString()); fs.close(); } }
Example #5
Source File: TestChRootedFileSystem.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { // create the test root on local_fs Configuration conf = new Configuration(); fSysTarget = FileSystem.getLocal(conf); fileSystemTestHelper = new FileSystemTestHelper(); chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget); // In case previous test was killed before cleanup fSysTarget.delete(chrootedTo, true); fSysTarget.mkdirs(chrootedTo); // ChRoot to the root of the testDirectory fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf); }
Example #6
Source File: ViewFileSystemBaseTest.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { initializeTargetTestRoot(); // Make user and data dirs - we creates links to them in the mount table fsTarget.mkdirs(new Path(targetTestRoot,"user")); fsTarget.mkdirs(new Path(targetTestRoot,"data")); fsTarget.mkdirs(new Path(targetTestRoot,"dir2")); fsTarget.mkdirs(new Path(targetTestRoot,"dir3")); FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile")); // Now we use the mount fs to set links to user and dir // in the test root // Set up the defaultMT in the config with our mount point links conf = ViewFileSystemTestSetup.createConfig(); setupMountPoints(); fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); }
Example #7
Source File: ViewFileSystemBaseTest.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGetBlockLocations() throws IOException { Path targetFilePath = new Path(targetTestRoot,"data/largeFile"); FileSystemTestHelper.createFile(fsTarget, targetFilePath, 10, 1024); Path viewFilePath = new Path("/data/largeFile"); Assert.assertTrue("Created File should be type File", fsView.isFile(viewFilePath)); BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100); Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length); BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100); compareBLs(viewBL, targetBL); // Same test but now get it via the FileStatus Parameter fsView.getFileBlockLocations( fsView.getFileStatus(viewFilePath), 0, 10240+100); targetBL = fsTarget.getFileBlockLocations( fsTarget.getFileStatus(targetFilePath), 0, 10240+100); compareBLs(viewBL, targetBL); }
Example #8
Source File: ViewFileSystemBaseTest.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { initializeTargetTestRoot(); // Make user and data dirs - we creates links to them in the mount table fsTarget.mkdirs(new Path(targetTestRoot,"user")); fsTarget.mkdirs(new Path(targetTestRoot,"data")); fsTarget.mkdirs(new Path(targetTestRoot,"dir2")); fsTarget.mkdirs(new Path(targetTestRoot,"dir3")); FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile")); // Now we use the mount fs to set links to user and dir // in the test root // Set up the defaultMT in the config with our mount point links conf = ViewFileSystemTestSetup.createConfig(); setupMountPoints(); fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); }
Example #9
Source File: ViewFileSystemBaseTest.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGetBlockLocations() throws IOException { Path targetFilePath = new Path(targetTestRoot,"data/largeFile"); FileSystemTestHelper.createFile(fsTarget, targetFilePath, 10, 1024); Path viewFilePath = new Path("/data/largeFile"); Assert.assertTrue("Created File should be type File", fsView.isFile(viewFilePath)); BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100); Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length); BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100); compareBLs(viewBL, targetBL); // Same test but now get it via the FileStatus Parameter fsView.getFileBlockLocations( fsView.getFileStatus(viewFilePath), 0, 10240+100); targetBL = fsTarget.getFileBlockLocations( fsTarget.getFileStatus(targetFilePath), 0, 10240+100); compareBLs(viewBL, targetBL); }
Example #10
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #11
Source File: TestChRootedFileSystem.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { // create the test root on local_fs Configuration conf = new Configuration(); fSysTarget = FileSystem.getLocal(conf); fileSystemTestHelper = new FileSystemTestHelper(); chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget); // In case previous test was killed before cleanup fSysTarget.delete(chrootedTo, true); fSysTarget.mkdirs(chrootedTo); // ChRoot to the root of the testDirectory fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf); }
Example #12
Source File: TestReservedRawPaths.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #13
Source File: TestFsShellPermission.java From hadoop with Apache License 2.0 | 5 votes |
private void createFiles(FileSystem fs, String topdir, FileEntry[] entries) throws IOException { for (FileEntry entry : entries) { String newPathStr = topdir + "/" + entry.getPath(); Path newPath = new Path(newPathStr); if (entry.isDirectory()) { fs.mkdirs(newPath); } else { FileSystemTestHelper.createFile(fs, newPath); } fs.setPermission(newPath, new FsPermission(entry.getPermission())); fs.setOwner(newPath, entry.getOwner(), entry.getGroup()); } }
Example #14
Source File: TestTransferFsImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Test to verify the timeout of Image upload */ @Test(timeout = 10000) public void testImageUploadTimeout() throws Exception { Configuration conf = new HdfsConfiguration(); NNStorage mockStorage = Mockito.mock(NNStorage.class); HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC, TestImageTransferServlet.class); testServer.start(); URL serverURL = HttpServerFunctionalTest.getServerURL(testServer); // set the timeout here, otherwise it will take default. TransferFsImage.timeout = 2000; File tmpDir = new File(new FileSystemTestHelper().getTestRootDir()); tmpDir.mkdirs(); File mockImageFile = File.createTempFile("image", "", tmpDir); FileOutputStream imageFile = new FileOutputStream(mockImageFile); imageFile.write("data".getBytes()); imageFile.close(); Mockito.when( mockStorage.findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong())).thenReturn(mockImageFile); Mockito.when(mockStorage.toColonSeparatedString()).thenReturn( "storage:info:string"); try { TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage, NameNodeFile.IMAGE, 1L); fail("TransferImage Should fail with timeout"); } catch (SocketTimeoutException e) { assertEquals("Upload should timeout", "Read timed out", e.getMessage()); } } finally { testServer.stop(); } }
Example #15
Source File: TestFsShellPermission.java From big-c with Apache License 2.0 | 5 votes |
private void createFiles(FileSystem fs, String topdir, FileEntry[] entries) throws IOException { for (FileEntry entry : entries) { String newPathStr = topdir + "/" + entry.getPath(); Path newPath = new Path(newPathStr); if (entry.isDirectory()) { fs.mkdirs(newPath); } else { FileSystemTestHelper.createFile(fs, newPath); } fs.setPermission(newPath, new FsPermission(entry.getPermission())); fs.setOwner(newPath, entry.getOwner(), entry.getGroup()); } }
Example #16
Source File: TestSaslDataTransfer.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests DataTransferProtocol with the given client configuration. * * @param conf client configuration * @throws IOException if there is an I/O error */ private void doTest(HdfsConfiguration conf) throws IOException { fs = FileSystem.get(cluster.getURI(), conf); FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE); assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8")); BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0, Long.MAX_VALUE); assertNotNull(blockLocations); assertEquals(NUM_BLOCKS, blockLocations.length); for (BlockLocation blockLocation: blockLocations) { assertNotNull(blockLocation.getHosts()); assertEquals(3, blockLocation.getHosts().length); } }
Example #17
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testSecureHAToken() throws IOException, InterruptedException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.setBoolean(DFSConfigKeys .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf)); FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs); cluster.transitionToActive(0); Token<?> token = fs.getDelegationToken(null); cluster.shutdownNameNode(0); cluster.transitionToActive(1); token.renew(conf); token.cancel(conf); verify(fs).renewDelegationToken(token); verify(fs).cancelDelegationToken(token); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #18
Source File: TestEncryptionZonesWithHA.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example #19
Source File: TestDelegationTokenFetcher.java From big-c with Apache License 2.0 | 5 votes |
@Before public void init() throws URISyntaxException, IOException { dfs = mock(DistributedFileSystem.class); conf = new Configuration(); uri = new URI("hdfs://" + SERVICE_VALUE); FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs); }
Example #20
Source File: TestViewFileSystemHdfs.java From big-c with Apache License 2.0 | 5 votes |
@Override @Before public void setUp() throws Exception { // create the test root on local_fs fsTarget = fHdfs; fsTarget2 = fHdfs2; targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2); super.setUp(); }
Example #21
Source File: TestFsVolumeList.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() { dataset = mock(FsDatasetImpl.class); baseDir = new FileSystemTestHelper().getTestRootDir(); Configuration blockScannerConf = new Configuration(); blockScannerConf.setInt(DFSConfigKeys. DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); blockScanner = new BlockScanner(null, blockScannerConf); }
Example #22
Source File: TestViewFileSystemDelegation.java From big-c with Apache License 2.0 | 5 votes |
static FakeFileSystem setupFileSystem(URI uri, Class clazz) throws Exception { String scheme = uri.getScheme(); conf.set("fs."+scheme+".impl", clazz.getName()); FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf); assertEquals(uri, fs.getUri()); Path targetPath = new FileSystemTestHelper().getAbsoluteTestRootPath(fs); ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri()); return fs; }
Example #23
Source File: TestChRootedFileSystem.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testList() throws IOException { FileStatus fs = fSys.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); // should return the full path not the chrooted path Assert.assertEquals(fs.getPath(), chrootedTo); // list on Slash FileStatus[] dirPaths = fSys.listStatus(new Path("/")); Assert.assertEquals(0, dirPaths.length); fileSystemTestHelper.createFile(fSys, "/foo"); fileSystemTestHelper.createFile(fSys, "/bar"); fSys.mkdirs(new Path("/dirX")); fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirY")); fSys.mkdirs(new Path("/dirX/dirXX")); dirPaths = fSys.listStatus(new Path("/")); Assert.assertEquals(4, dirPaths.length); // note 2 crc files // Note the the file status paths are the full paths on target fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "foo"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "bar"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirX"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirY"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }
Example #24
Source File: TestViewFileSystemDelegation.java From hadoop with Apache License 2.0 | 5 votes |
static FakeFileSystem setupFileSystem(URI uri, Class clazz) throws Exception { String scheme = uri.getScheme(); conf.set("fs."+scheme+".impl", clazz.getName()); FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf); assertEquals(uri, fs.getUri()); Path targetPath = new FileSystemTestHelper().getAbsoluteTestRootPath(fs); ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri()); return fs; }
Example #25
Source File: TestChRootedFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testList() throws IOException { FileStatus fs = fSys.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); // should return the full path not the chrooted path Assert.assertEquals(fs.getPath(), chrootedTo); // list on Slash FileStatus[] dirPaths = fSys.listStatus(new Path("/")); Assert.assertEquals(0, dirPaths.length); fileSystemTestHelper.createFile(fSys, "/foo"); fileSystemTestHelper.createFile(fSys, "/bar"); fSys.mkdirs(new Path("/dirX")); fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirY")); fSys.mkdirs(new Path("/dirX/dirXX")); dirPaths = fSys.listStatus(new Path("/")); Assert.assertEquals(4, dirPaths.length); // note 2 crc files // Note the the file status paths are the full paths on target fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "foo"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "bar"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirX"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirY"), dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }
Example #26
Source File: TestChRootedFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
/** * We would have liked renames across file system to fail but * Unfortunately there is not way to distinguish the two file systems * @throws IOException */ @Test public void testRenameAcrossFs() throws IOException { fSys.mkdirs(new Path("/newDir/dirFoo")); fSys.rename(new Path("/newDir/dirFoo"), new Path("file:///tmp/dirFooBar")); FileSystemTestHelper.isDir(fSys, new Path("/tmp/dirFooBar")); }
Example #27
Source File: TestViewFileSystemHdfs.java From hadoop with Apache License 2.0 | 5 votes |
@Override @Before public void setUp() throws Exception { // create the test root on local_fs fsTarget = fHdfs; fsTarget2 = fHdfs2; targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2); super.setUp(); }
Example #28
Source File: TestDelegationTokenFetcher.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void init() throws URISyntaxException, IOException { dfs = mock(DistributedFileSystem.class); conf = new Configuration(); uri = new URI("hdfs://" + SERVICE_VALUE); FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs); }
Example #29
Source File: ViewFileSystemTestSetup.java From hadoop with Apache License 2.0 | 5 votes |
/** * * @param fsTarget - the target fs of the view fs. * @return return the ViewFS File context to be used for tests * @throws Exception */ static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception { /** * create the test root on local_fs - the mount table will point here */ Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget); // In case previous test was killed before cleanup fsTarget.delete(targetOfTests, true); fsTarget.mkdirs(targetOfTests); // Set up viewfs link for test dir as described above String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri() .getPath(); linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); // Set up viewfs link for home dir as described above setUpHomeDir(conf, fsTarget); // the test path may be relative to working dir - we need to make that work: // Set up viewfs link for wd as described above String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. Log.info("Working dir is: " + fsView.getWorkingDirectory()); return fsView; }
Example #30
Source File: TestEncryptionZonesWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }