Java Code Examples for org.apache.hadoop.fs.Path#toString()
The following examples show how to use
org.apache.hadoop.fs.Path#toString() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableSnapshotInputFormatImpl.java From hbase with Apache License 2.0 | 6 votes |
public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List<String> locations, Scan scan, Path restoreDir) { this.htd = htd; this.regionInfo = regionInfo; if (locations == null || locations.isEmpty()) { this.locations = new String[0]; } else { this.locations = locations.toArray(new String[locations.size()]); } try { this.scan = scan != null ? TableMapReduceUtil.convertScanToString(scan) : ""; } catch (IOException e) { LOG.warn("Failed to convert Scan to String", e); } this.restoreDir = restoreDir.toString(); }
Example 2
Source File: CephFileSystem.java From cephfs-hadoop with GNU Lesser General Public License v2.1 | 6 votes |
/** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. Same as create(), except fails if parent directory doesn't * already exist. * @param path the file name to open * @param permission * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. * @param blockSize * @param progress * @throws IOException * @see #setPermission(Path, FsPermission) * @deprecated API only for 0.20-append */ @Deprecated public FSDataOutputStream createNonRecursive(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { path = makeAbsolute(path); Path parent = path.getParent(); if (parent != null) { CephStat stat = new CephStat(); ceph.lstat(parent, stat); // handles FileNotFoundException case if (stat.isFile()) throw new FileAlreadyExistsException(parent.toString()); } return this.create(path, permission, overwrite, bufferSize, replication, blockSize, progress); }
Example 3
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 6 votes |
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family, final List<Pair<Path, Path>> pairs) { List<Put> puts = new ArrayList<>(pairs.size()); for (Pair<Path, Path> pair : pairs) { Path path = pair.getSecond(); String file = path.toString(); int lastSlash = file.lastIndexOf("/"); String filename = file.substring(lastSlash + 1); Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM, Bytes.toString(region), BLK_LD_DELIM, filename)); put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName()); put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family); put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file)); put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE); puts.add(put); LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region)); } return puts; }
Example 4
Source File: MbVectorTilesInputFormatTest.java From mrgeo with Apache License 2.0 | 6 votes |
@Test public void getSplitsZoom14_3() throws Exception { Path dbPath = new Path(input, "AmbulatoryPt.mbtiles"); MbVectorTilesSettings dbSettings = new MbVectorTilesSettings(dbPath.toString(), new String[] { "ambulatory"}, 14, 3, null); MbVectorTilesInputFormat ifmt = new MbVectorTilesInputFormat(dbSettings); List<InputSplit> splits = ifmt.getSplits(context); Assert.assertNotNull(splits); Assert.assertEquals(2, splits.size()); Assert.assertEquals(4L, ifmt.getRecordCount(conf)); int count = 0; for (InputSplit split: splits) { RecordReader<FeatureIdWritable, Geometry> reader = ifmt.createRecordReader(split, context); Assert.assertNotNull(reader); while (reader.nextKeyValue()) count++; } Assert.assertEquals(8, count); }
Example 5
Source File: ThirdEyeJob.java From incubator-pinot with Apache License 2.0 | 6 votes |
@Override Properties getJobProperties(Properties inputConfig, String root, String collection, DateTime minTime, DateTime maxTime, String inputPaths) throws Exception { Properties config = new Properties(); Path aggOutputPath = new Path(getIndexDir(root, collection, minTime, maxTime) + File.separator + AGGREGATION.getName()); FileSystem fs = FileSystem.get(new Configuration()); if (fs.exists(aggOutputPath)) { inputPaths = aggOutputPath.toString(); } config.setProperty(TopKPhaseConstants.TOPK_PHASE_INPUT_PATH.toString(), inputPaths); config.setProperty(TopKPhaseConstants.TOPK_PHASE_OUTPUT_PATH.toString(), getIndexDir(root, collection, minTime, maxTime) + File.separator + TOPK.getName()); return config; }
Example 6
Source File: Trash.java From incubator-gobblin with Apache License 2.0 | 6 votes |
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation) throws IOException { if (fs.exists(trashLocation)) { if (!fs.isDirectory(trashLocation)) { throw new IOException(String.format("Trash location %s is not a directory.", trashLocation)); } if (!fs.exists(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) { // If trash identifier file is not present, directory might have been created by user. // Add trash identifier file only if directory is empty. if (fs.listStatus(trashLocation).length > 0) { throw new IOException(String.format("Trash directory %s exists, but it does not look like a trash directory. " + "File: %s missing and directory is not empty.", trashLocation, TRASH_IDENTIFIER_FILE)); } else if (!fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) { throw new IOException(String.format("Failed to create file %s in existing trash directory %s.", TRASH_IDENTIFIER_FILE, trashLocation)); } } } else if (!(safeFsMkdir(fs, trashLocation.getParent(), ALL_PERM) && safeFsMkdir(fs, trashLocation, PERM) && fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE)))) { // Failed to create directory or create trash identifier file. throw new IOException("Failed to create trash directory at " + trashLocation.toString()); } }
Example 7
Source File: Writer.java From neo4j-mazerunner with Apache License 2.0 | 5 votes |
public static void dispatchJob(GraphDatabaseService db, String type) throws IOException, URISyntaxException { // Export the subgraph to HDFS Path pt = exportSubgraphToHDFSParallel(db); // Serialize processor message ProcessorMessage message = new ProcessorMessage(pt.toString(), type, ProcessorMode.Unpartitioned); Gson gson = new Gson(); String strMessage = gson.toJson(message); // Send message to the Spark graph processor Worker.sendMessage(strMessage); }
Example 8
Source File: MapFileOutputFormat.java From big-c with Apache License 2.0 | 5 votes |
public RecordWriter<WritableComparable, Writable> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException { // get the path of the temporary output file Path file = FileOutputFormat.getTaskOutputPath(job, name); FileSystem fs = file.getFileSystem(job); CompressionCodec codec = null; CompressionType compressionType = CompressionType.NONE; if (getCompressOutput(job)) { // find the kind of compression to do compressionType = SequenceFileOutputFormat.getOutputCompressionType(job); // find the right codec Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, DefaultCodec.class); codec = ReflectionUtils.newInstance(codecClass, job); } // ignore the progress parameter, since MapFile is local final MapFile.Writer out = new MapFile.Writer(job, fs, file.toString(), job.getOutputKeyClass().asSubclass(WritableComparable.class), job.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, progress); return new RecordWriter<WritableComparable, Writable>() { public void write(WritableComparable key, Writable value) throws IOException { out.append(key, value); } public void close(Reporter reporter) throws IOException { out.close();} }; }
Example 9
Source File: TestMiniMRClasspath.java From big-c with Apache License 2.0 | 5 votes |
static void configureWordCount(FileSystem fs, JobConf conf, String input, int numMaps, int numReduces, Path inDir, Path outDir) throws IOException { fs.delete(outDir, true); if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } DataOutputStream file = fs.create(new Path(inDir, "part-0")); file.writeBytes(input); file.close(); FileSystem.setDefaultUri(conf, fs.getUri()); conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME); conf.setJobName("wordcount"); conf.setInputFormat(TextInputFormat.class); // the keys are words (strings) conf.setOutputKeyClass(Text.class); // the values are counts (ints) conf.setOutputValueClass(IntWritable.class); conf.set("mapred.mapper.class", "testjar.ClassWordCount$MapClass"); conf.set("mapred.combine.class", "testjar.ClassWordCount$Reduce"); conf.set("mapred.reducer.class", "testjar.ClassWordCount$Reduce"); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setNumMapTasks(numMaps); conf.setNumReduceTasks(numReduces); //set the tests jar file conf.setJarByClass(TestMiniMRClasspath.class); }
Example 10
Source File: HistoryViewer.java From RDFS with Apache License 2.0 | 5 votes |
public HistoryViewer(String outputDir, Configuration conf, boolean printAll) throws IOException { this.conf = conf; this.printAll = printAll; Path output = new Path(outputDir); historyLogDir = new Path(output, "_logs/history"); try { fs = output.getFileSystem(this.conf); if (!fs.exists(output)) { throw new IOException("History directory " + historyLogDir.toString() + "does not exist"); } Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir, jobLogFileFilter)); if (jobFiles.length == 0) { throw new IOException("Not a valid history directory " + historyLogDir.toString()); } jobLogFile = jobFiles[0].toString(); String[] jobDetails = JobInfo.decodeJobHistoryFileName(jobFiles[0].getName()).split("_"); trackerHostName = jobDetails[0]; trackerStartTime = jobDetails[1]; jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4]; job = new JobHistory.JobInfo(jobId); DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs); } catch(Exception e) { throw new IOException("Not able to initialize History viewer", e); } }
Example 11
Source File: TestMultipleOutputs.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public void setUp() throws Exception { super.setUp(); Path rootDir = getDir(ROOT_DIR); Path inDir = getDir(IN_DIR); JobConf conf = createJobConf(); FileSystem fs = FileSystem.get(conf); fs.delete(rootDir, true); if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } }
Example 12
Source File: WindowsSecureContainerExecutor.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected String[] getRunCommand(String command, String groupId, String userName, Path pidFile, Configuration conf) { File f = new File(command); if (LOG.isDebugEnabled()) { LOG.debug(String.format("getRunCommand: %s exists:%b", command, f.exists())); } return new String[] { Shell.WINUTILS, "task", "createAsUser", groupId, userName, pidFile.toString(), "cmd /c " + command }; }
Example 13
Source File: TestMiniMRChildTask.java From big-c with Apache License 2.0 | 5 votes |
private void configure(JobConf conf, Path inDir, Path outDir, String input, Class<? extends Mapper> map, Class<? extends Reducer> reduce) throws IOException { // set up the input file system and write input text. FileSystem inFs = inDir.getFileSystem(conf); FileSystem outFs = outDir.getFileSystem(conf); outFs.delete(outDir, true); if (!inFs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } { // write input into input file DataOutputStream file = inFs.create(new Path(inDir, "part-0")); file.writeBytes(input); file.close(); } // configure the mapred Job which creates a tempfile in map. conf.setJobName("testmap"); conf.setMapperClass(map); conf.setReducerClass(reduce); conf.setNumMapTasks(1); conf.setNumReduceTasks(0); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp")).toString().replace(' ', '+'); conf.set("test.build.data", TEST_ROOT_DIR); }
Example 14
Source File: MatrixReader.java From systemds with Apache License 2.0 | 5 votes |
protected static void checkValidInputFile(FileSystem fs, Path path) throws IOException { //check non-existing file if( !fs.exists(path) ) throw new IOException("File "+path.toString()+" does not exist on HDFS/LFS."); //check for empty file if( HDFSTool.isFileEmpty(fs, path) ) throw new EOFException("Empty input file "+ path.toString() +"."); }
Example 15
Source File: JobUtil.java From jumbune with GNU Lesser General Public License v3.0 | 5 votes |
/** * This method call when injected into a class will modify the output path, * only if output is into HDFS * * @param job * Job whose output path need to be changed */ public static void modifyOutputPath(Job job) throws Exception { Path path = FileOutputFormat.getOutputPath(job); if (path == null) { throw new IllegalArgumentException("Job Output path is null, expecting not null path value"); } StringBuilder out = new StringBuilder(path.toString()); out.append(SEPARATOR_UNDERSCORE).append(System.currentTimeMillis()); FileOutputFormat.setOutputPath(job, new Path(out.toString())); }
Example 16
Source File: UserDefinedFunctionsIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test creating functions using dir otherthan hbase.dynamic.jars.dir * @throws Exception */ @Test public void testCreateFunctionNonDynamicJarDir() throws Exception { Connection conn = driver.connect(url, EMPTY_PROPS); String tableName = "table" + name.getMethodName(); conn.createStatement().execute("create table " + tableName + "(tenant_id varchar not null, k integer not null, " + "firstname varchar, lastname varchar constraint pk primary key(tenant_id,k)) MULTI_TENANT=true"); String tenantId = "tenId" + name.getMethodName(); Connection tenantConn = driver.connect(url + ";" + PhoenixRuntime.TENANT_ID_ATTRIB + "=" + tenantId, EMPTY_PROPS); Statement stmtTenant = tenantConn.createStatement(); tenantConn.commit(); compileTestClass(MY_REVERSE_CLASS_NAME, MY_REVERSE_PROGRAM, 8); Path destJarPathOnHDFS = copyJarsFromDynamicJarsDirToDummyHDFSDir("myjar8.jar"); try { String sql = "create function myfunction(VARCHAR) returns VARCHAR as 'org.apache.phoenix.end2end." + MY_REVERSE_CLASS_NAME + "' using jar '" + destJarPathOnHDFS.toString() + "'"; stmtTenant.execute(sql); ResultSet rs = stmtTenant.executeQuery("select myfunction(firstname) from " + tableName); fail("expecting java.lang.SecurityException"); }catch(Exception e){ assertTrue(ExceptionUtils.getRootCause(e) instanceof SecurityException); }finally { stmtTenant.execute("drop function myfunction"); } }
Example 17
Source File: NativeS3FileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
@Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { if (!exists(f)) { throw new FileNotFoundException(f.toString()); } Path absolutePath = makeAbsolute(f); String key = pathToKey(absolutePath); return new FSDataInputStream(new BufferedFSInputStream( new NativeS3FsInputStream(store.retrieve(key), key), bufferSize)); }
Example 18
Source File: WindowsSecureContainerExecutor.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens(); InetSocketAddress nmAddr = ctx.getNmAddr(); String user = ctx.getUser(); String appId = ctx.getAppId(); String locId = ctx.getLocId(); LocalDirsHandlerService dirsHandler = ctx.getDirsHandler(); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); Path classpathJarPrivateDir = dirsHandler.getLocalPathForWrite( ResourceLocalizationService.NM_PRIVATE_DIR); createUserLocalDirs(localDirs, user); createUserCacheDirs(localDirs, user); createAppDirs(localDirs, user, appId); createAppLogDirs(appId, logDirs, user); Path appStorageDir = getWorkingDir(localDirs, user, appId); String tokenFn = String.format( ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId); Path tokenDst = new Path(appStorageDir, tokenFn); copyFile(nmPrivateContainerTokensPath, tokenDst, user); File cwdApp = new File(appStorageDir.toString()); if (LOG.isDebugEnabled()) { LOG.debug(String.format("cwdApp: %s", cwdApp)); } List<String> command ; command = new ArrayList<String>(); //use same jvm as parent File jvm = new File( new File(System.getProperty("java.home"), "bin"), "java.exe"); command.add(jvm.toString()); Path cwdPath = new Path(cwdApp.getPath()); // Build a temp classpath jar. See ContainerLaunch.sanitizeEnv(). // Passing CLASSPATH explicitly is *way* too long for command line. String classPath = System.getProperty("java.class.path"); Map<String, String> env = new HashMap<String, String>(System.getenv()); String jarCp[] = FileUtil.createJarWithClassPath(classPath, classpathJarPrivateDir, cwdPath, env); String classPathJar = localizeClasspathJar( new Path(jarCp[0]), cwdPath, user).toString(); command.add("-classpath"); command.add(classPathJar + jarCp[1]); String javaLibPath = System.getProperty("java.library.path"); if (javaLibPath != null) { command.add("-Djava.library.path=" + javaLibPath); } command.addAll(ContainerLocalizer.getJavaOpts(getConf())); ContainerLocalizer.buildMainArgs(command, user, appId, locId, nmAddr, localDirs); String cmdLine = StringUtils.join(command, " "); String localizerPid = String.format(LOCALIZER_PID_FORMAT, locId); WintuilsProcessStubExecutor stubExecutor = new WintuilsProcessStubExecutor( cwdApp.getAbsolutePath(), localizerPid, user, "nul:", cmdLine); try { stubExecutor.execute(); stubExecutor.validateResult(); } finally { stubExecutor.close(); try { killContainer(localizerPid, Signal.KILL); } catch(Throwable e) { LOG.warn(String.format( "An exception occured during the cleanup of localizer job %s:%n%s", localizerPid, org.apache.hadoop.util.StringUtils.stringifyException(e))); } } }
Example 19
Source File: TestHarWithCombineFileInputFormat.java From RDFS with Apache License 2.0 | 4 votes |
protected void setUp() throws Exception { super.setUp(); conf = new JobConf(); dfscluster = new MiniDFSCluster(conf, 1, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(1, fs.getUri().toString(), 1); hdfsInputPath = new Path(fs.getHomeDirectory(), "test"); archiveDir = new Path(fs.getHomeDirectory(), "test-archive"); filea = new Path(hdfsInputPath, "a"); fileb = new Path(hdfsInputPath, "b"); // Create the following directory structure // ~/test/a // ~/test/b/ // ~/test-archive/foo.har/a (in HAR) // ~/test-archive/foo.har/b (in HAR) fs.mkdirs(hdfsInputPath); FSDataOutputStream out = fs.create(filea); out.write("a".getBytes()); out.close(); out = fs.create(fileb); out.write("b".getBytes()); out.close(); HadoopArchives har = new HadoopArchives(conf); String archiveName = "foo.har"; String[] args = new String[5]; args[0] = "-archiveName"; args[1] = "foo.har"; args[2] = "-p"; args[3] = hdfsInputPath.toString(); args[4] = archiveDir.toString(); int ret = ToolRunner.run(har, args); assertTrue("Failed to create HAR", ret == 0); archiveInputPath = new Path("har://" + archiveDir.toUri().getPath(), archiveName); }
Example 20
Source File: HDFSUtil.java From gemfirexd-oss with Apache License 2.0 | 4 votes |
private static Path getBaseHoplogPath(Path hoplogPath) { String originalFilename = hoplogPath.toString(); int tmpExtIndex = originalFilename.lastIndexOf(".tmp"); String trimmedFilename = originalFilename.substring(0, tmpExtIndex); return new Path(trimmedFilename); }