Java Code Examples for org.apache.hadoop.fs.Path#SEPARATOR
The following examples show how to use
org.apache.hadoop.fs.Path#SEPARATOR .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTezCommonUtils.java From tez with Apache License 2.0 | 5 votes |
@Test(timeout = 5000) public void testTezRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path confStageDir = TezCommonUtils.getRecoveryPath(stageDir, conf); String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME; Assert.assertEquals(confStageDir.toString(), expectedDir); }
Example 2
Source File: FSStorageAgent.java From attic-apex-core with Apache License 2.0 | 5 votes |
@Override public Object load(int operatorId, long windowId) throws IOException { Path lPath = new Path(path + Path.SEPARATOR + String.valueOf(operatorId) + Path.SEPARATOR + Long.toHexString(windowId)); logger.debug("Loading: {}", lPath); FSDataInputStream stream = fileContext.open(lPath); try { return retrieve(stream); } finally { stream.close(); } }
Example 3
Source File: NamenodeFsck.java From RDFS with Apache License 2.0 | 5 votes |
private void listCorruptOpenFiles() throws IOException { int matchedCorruptFilesCount = 0; // directory representation of path String pathdir = path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR; FileStatus pathFileStatus = nn.getNamesystem().getFileInfo(pathdir); List<FileStatus> corruptFileStatusList = new ArrayList<FileStatus>(); checkForCorruptOpenFiles(pathFileStatus, corruptFileStatusList); for (FileStatus fileStatus : corruptFileStatusList) { String currentPath = fileStatus.getPath().toString(); if (currentPath.startsWith(pathdir) || currentPath.equals(path)) { matchedCorruptFilesCount++; // print the header before listing first item if (matchedCorruptFilesCount == 1 ) { out.println("Here are a few files that may be corrupted:"); out.println("==========================================="); } out.println(currentPath); } } out.println(); out.println(buildSummaryResultForListCorruptFiles(matchedCorruptFilesCount, path)); }
Example 4
Source File: FSDirRenameOp.java From big-c with Apache License 2.0 | 5 votes |
/** * @deprecated Use {@link #renameToInt(FSDirectory, String, String, * boolean, Options.Rename...)} */ @Deprecated @SuppressWarnings("deprecation") private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, boolean logRetryCache) throws IOException { // Rename does not operate on link targets // Do not resolveLink when checking permissions of src and dst // Check write access to parent of src final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); // Note: We should not be doing this. This is move() not renameTo(). final String actualDst = fsd.isDir(dst) ? dst + Path.SEPARATOR + new Path(src).getName() : dst; final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null, false); // Check write access to ancestor of dst fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null, false); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to " + dst); } final long mtime = Time.now(); boolean stat = false; fsd.writeLock(); try { stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime); } finally { fsd.writeUnlock(); } if (stat) { fsd.getEditLog().logRename(src, actualDst, mtime, logRetryCache); return true; } return false; }
Example 5
Source File: TestDistCpSync.java From big-c with Apache License 2.0 | 5 votes |
/** * Test the sync returns false in the following scenarios: * 1. the source/target dir are not snapshottable dir * 2. the source/target does not have the given snapshots * 3. changes have been made in target */ @Test public void testFallback() throws Exception { // the source/target dir are not snapshottable dir Assert.assertFalse(DistCpSync.sync(options, conf)); // make sure the source path has been updated to the snapshot path final Path spath = new Path(source, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2"); Assert.assertEquals(spath, options.getSourcePaths().get(0)); // reset source path in options options.setSourcePaths(Arrays.asList(source)); // the source/target does not have the given snapshots dfs.allowSnapshot(source); dfs.allowSnapshot(target); Assert.assertFalse(DistCpSync.sync(options, conf)); Assert.assertEquals(spath, options.getSourcePaths().get(0)); // reset source path in options options.setSourcePaths(Arrays.asList(source)); dfs.createSnapshot(source, "s1"); dfs.createSnapshot(source, "s2"); dfs.createSnapshot(target, "s1"); Assert.assertTrue(DistCpSync.sync(options, conf)); // reset source paths in options options.setSourcePaths(Arrays.asList(source)); // changes have been made in target final Path subTarget = new Path(target, "sub"); dfs.mkdirs(subTarget); Assert.assertFalse(DistCpSync.sync(options, conf)); // make sure the source path has been updated to the snapshot path Assert.assertEquals(spath, options.getSourcePaths().get(0)); // reset source paths in options options.setSourcePaths(Arrays.asList(source)); dfs.delete(subTarget, true); Assert.assertTrue(DistCpSync.sync(options, conf)); }
Example 6
Source File: LocalIndexIT.java From phoenix with Apache License 2.0 | 5 votes |
private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move) throws IOException { Path root = FSUtils.getRootDir(conf); Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath(); Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf); assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf)); }
Example 7
Source File: FSRecoveryHandler.java From Bats with Apache License 2.0 | 5 votes |
@Override public void save(Object state) throws IOException { if (fs.exists(snapshotBackupPath)) { throw new IllegalStateException("Found previous backup " + snapshotBackupPath); } if (fs.exists(snapshotPath)) { LOG.debug("Backup {} to {}", snapshotPath, snapshotBackupPath); fs.rename(snapshotPath, snapshotBackupPath); } LOG.debug("Writing checkpoint to {}", snapshotPath); try (FSDataOutputStream fsOutputStream = fs.create(snapshotPath); ObjectOutputStream oos = new ObjectOutputStream(fsOutputStream)) { oos.writeObject(state); } // remove snapshot backup if (fs.exists(snapshotBackupPath) && !fs.delete(snapshotBackupPath, false)) { throw new IOException("Failed to remove " + snapshotBackupPath); } // remove log backup Path logBackup = new Path(basedir + Path.SEPARATOR + FILE_LOG_BACKUP); if (fs.exists(logBackup) && !fs.delete(logBackup, false)) { throw new IOException("Failed to remove " + logBackup); } }
Example 8
Source File: FSDirRenameOp.java From hadoop with Apache License 2.0 | 5 votes |
/** * @deprecated Use {@link #renameToInt(FSDirectory, String, String, * boolean, Options.Rename...)} */ @Deprecated @SuppressWarnings("deprecation") private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, boolean logRetryCache) throws IOException { // Rename does not operate on link targets // Do not resolveLink when checking permissions of src and dst // Check write access to parent of src final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); // Note: We should not be doing this. This is move() not renameTo(). final String actualDst = fsd.isDir(dst) ? dst + Path.SEPARATOR + new Path(src).getName() : dst; final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null, false); // Check write access to ancestor of dst fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null, false); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to " + dst); } final long mtime = Time.now(); boolean stat = false; fsd.writeLock(); try { stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime); } finally { fsd.writeUnlock(); } if (stat) { fsd.getEditLog().logRename(src, actualDst, mtime, logRetryCache); return true; } return false; }
Example 9
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 5 votes |
@Test /* * Tests copying from archive file system to a local file system */ public void testCopyToLocal() throws Exception { final String fullHarPathStr = makeArchive(); // make path to copy the file to: final String tmpDir = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath = new Path(tmpDir); final LocalFileSystem localFs = FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath, true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); // Create fresh HarFs: final HarFileSystem harFileSystem = new HarFileSystem(fs); try { final URI harUri = new URI(fullHarPathStr); harFileSystem.initialize(harUri, fs.getConf()); final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath = new Path(tmpPath, "straus"); // copy the Har file to a local file system: harFileSystem.copyToLocalFile(false, sourcePath, targetPath); FileStatus straus = localFs.getFileStatus(targetPath); // the file should contain just 1 character: assertEquals(1, straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath, true); } }
Example 10
Source File: FSRecoveryHandler.java From attic-apex-core with Apache License 2.0 | 5 votes |
@Override public void save(Object state) throws IOException { if (fs.exists(snapshotBackupPath)) { throw new IllegalStateException("Found previous backup " + snapshotBackupPath); } if (fs.exists(snapshotPath)) { LOG.debug("Backup {} to {}", snapshotPath, snapshotBackupPath); fs.rename(snapshotPath, snapshotBackupPath); } LOG.debug("Writing checkpoint to {}", snapshotPath); try (FSDataOutputStream fsOutputStream = fs.create(snapshotPath); ObjectOutputStream oos = new ObjectOutputStream(fsOutputStream)) { oos.writeObject(state); } // remove snapshot backup if (fs.exists(snapshotBackupPath) && !fs.delete(snapshotBackupPath, false)) { throw new IOException("Failed to remove " + snapshotBackupPath); } // remove log backup Path logBackup = new Path(basedir + Path.SEPARATOR + FILE_LOG_BACKUP); if (fs.exists(logBackup) && !fs.delete(logBackup, false)) { throw new IOException("Failed to remove " + logBackup); } }
Example 11
Source File: TestHistoryParser.java From tez with Apache License 2.0 | 5 votes |
private DagInfo getDagInfo(String dagId) throws TezException { //Parse downloaded contents File downloadedFile = new File(DOWNLOAD_DIR + Path.SEPARATOR + dagId + ".zip"); ATSFileParser parser = new ATSFileParser(Arrays.asList(downloadedFile)); DagInfo dagInfo = parser.getDAGData(dagId); assertTrue(dagInfo.getDagId().equals(dagId)); return dagInfo; }
Example 12
Source File: FileSplitterInputTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testScannerFilterForDuplicates() throws InterruptedException { String filePath = testMeta.dataDirectory + Path.SEPARATOR + "file0.txt"; testMeta.scanner = new MockScanner(); testMeta.fileSplitterInput.setScanner(testMeta.scanner); testMeta.fileSplitterInput.getScanner().setScanIntervalMillis(500); testMeta.fileSplitterInput.getScanner().setFilePatternRegularExp(".*[.]txt"); testMeta.fileSplitterInput.getScanner().setFiles(filePath); testMeta.fileSplitterInput.setup(testMeta.context); testMeta.fileSplitterInput.beginWindow(1); testMeta.scanner.semaphore.acquire(); testMeta.fileSplitterInput.emitTuples(); testMeta.fileSplitterInput.endWindow(); testMeta.fileSplitterInput.beginWindow(2); testMeta.fileSplitterInput.emitTuples(); testMeta.fileSplitterInput.endWindow(); Assert.assertEquals("File metadata", 1, testMeta.fileMetadataSink.collectedTuples.size()); for (Object fileMetadata : testMeta.fileMetadataSink.collectedTuples) { FileSplitterInput.FileMetadata metadata = (FileSplitterInput.FileMetadata)fileMetadata; Assert.assertTrue("path: " + metadata.getFilePath(), testMeta.filePaths.contains(metadata.getFilePath())); Assert.assertNotNull("name: ", metadata.getFileName()); } testMeta.fileMetadataSink.collectedTuples.clear(); testMeta.fileSplitterInput.teardown(); }
Example 13
Source File: Fetcher.java From tez with Apache License 2.0 | 5 votes |
private final String getMapOutputFile(String pathComponent) { String outputPath = Constants.TEZ_RUNTIME_TASK_OUTPUT_DIR + Path.SEPARATOR + pathComponent + Path.SEPARATOR + Constants.TEZ_RUNTIME_TASK_OUTPUT_FILENAME_STRING; if(ShuffleUtils.isTezShuffleHandler(conf)) { return Constants.DAG_PREFIX + this.dagIdentifier + Path.SEPARATOR + outputPath; } return outputPath; }
Example 14
Source File: PathData.java From big-c with Apache License 2.0 | 5 votes |
/** * Given a child of this directory, use the directory's path and the child's * basename to construct the string to the child. This preserves relative * paths since Path will fully qualify. * @param childPath a path contained within this directory * @return String of the path relative to this directory */ private String getStringForChildPath(Path childPath) { String basename = childPath.getName(); if (Path.CUR_DIR.equals(toString())) { return basename; } // check getPath() so scheme slashes aren't considered part of the path String separator = uri.getPath().endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR; return uriToString(uri, inferredSchemeFromPath) + separator + basename; }
Example 15
Source File: TestHMobStore.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testCommitFile() throws Exception { final Configuration conf = HBaseConfiguration.create(); init(name.getMethodName(), conf, true); String targetPathName = MobUtils.formatDate(new Date()); Path targetPath = new Path(store.getPath(), (targetPathName + Path.SEPARATOR + mobFilePath.getName())); fs.delete(targetPath, true); Assert.assertFalse(fs.exists(targetPath)); //commit file store.commitFile(mobFilePath, targetPath); Assert.assertTrue(fs.exists(targetPath)); }
Example 16
Source File: TaskTracker.java From hadoop-gpu with Apache License 2.0 | 4 votes |
static String getPidFile(String jobid, String taskid, boolean isCleanup) { return getLocalTaskDir(jobid, taskid, isCleanup) + Path.SEPARATOR + PID; }
Example 17
Source File: TezRuntimeChildJVM.java From incubator-tez with Apache License 2.0 | 4 votes |
private static String getTaskLogFile(LogName filter) { return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + filter.toString(); }
Example 18
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testReadFileContent() throws Exception { fileList.add(createFile(inputPath, fs, "c c")); final Path sub1 = new Path(inputPath, "sub 1"); fs.mkdirs(sub1); fileList.add(createFile(inputPath, fs, sub1.getName(), "file x y z")); fileList.add(createFile(inputPath, fs, sub1.getName(), "file")); fileList.add(createFile(inputPath, fs, sub1.getName(), "x")); fileList.add(createFile(inputPath, fs, sub1.getName(), "y")); fileList.add(createFile(inputPath, fs, sub1.getName(), "z")); final Path sub2 = new Path(inputPath, "sub 1 with suffix"); fs.mkdirs(sub2); fileList.add(createFile(inputPath, fs, sub2.getName(), "z")); // Generate a big binary file content: final byte[] binContent = prepareBin(); fileList.add(createFile(inputPath, fs, binContent, sub2.getName(), "bin")); fileList.add(createFile(inputPath, fs, new byte[0], sub2.getName(), "zero-length")); final String fullHarPathStr = makeArchive(); // Create fresh HarFs: final HarFileSystem harFileSystem = new HarFileSystem(fs); try { final URI harUri = new URI(fullHarPathStr); harFileSystem.initialize(harUri, fs.getConf()); // now read the file content and compare it against the expected: int readFileCount = 0; for (final String pathStr0 : fileList) { final Path path = new Path(fullHarPathStr + Path.SEPARATOR + pathStr0); final String baseName = path.getName(); final FileStatus status = harFileSystem.getFileStatus(path); if (status.isFile()) { // read the file: final byte[] actualContentSimple = readAllSimple( harFileSystem.open(path), true); final byte[] actualContentBuffer = readAllWithBuffer( harFileSystem.open(path), true); assertArrayEquals(actualContentSimple, actualContentBuffer); final byte[] actualContentFully = readAllWithReadFully( actualContentSimple.length, harFileSystem.open(path), true); assertArrayEquals(actualContentSimple, actualContentFully); final byte[] actualContentSeek = readAllWithSeek( actualContentSimple.length, harFileSystem.open(path), true); assertArrayEquals(actualContentSimple, actualContentSeek); final byte[] actualContentRead4 = readAllWithRead4(harFileSystem.open(path), true); assertArrayEquals(actualContentSimple, actualContentRead4); final byte[] actualContentSkip = readAllWithSkip( actualContentSimple.length, harFileSystem.open(path), harFileSystem.open(path), true); assertArrayEquals(actualContentSimple, actualContentSkip); if ("bin".equals(baseName)) { assertArrayEquals(binContent, actualContentSimple); } else if ("zero-length".equals(baseName)) { assertEquals(0, actualContentSimple.length); } else { String actual = new String(actualContentSimple, "UTF-8"); assertEquals(baseName, actual); } readFileCount++; } } assertEquals(fileList.size(), readFileCount); } finally { harFileSystem.close(); } }
Example 19
Source File: TaskTracker.java From RDFS with Apache License 2.0 | 4 votes |
static String getCacheSubdir() { return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.CACHEDIR; }
Example 20
Source File: MRApps.java From hadoop with Apache License 2.0 | 4 votes |
public static Path getEndJobCommitSuccessFile(Configuration conf, String user, JobId jobId) { Path endCommitFile = new Path(MRApps.getStagingAreaDir(conf, user), jobId.toString() + Path.SEPARATOR + "COMMIT_SUCCESS"); return endCommitFile; }