Java Code Examples for org.apache.hadoop.fs.FileSystem#get()

The following examples show how to use org.apache.hadoop.fs.FileSystem#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHttpFSWithKerberos.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass)
  throws Exception {
  createHttpFSServer();
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", fileSystemClass.getName());
  conf.set("fs.hdfs.impl.disable.cache", "true");
  URI uri = new URI( "webhdfs://" +
                     TestJettyHelper.getJettyURL().toURI().getAuthority());
  FileSystem fs = FileSystem.get(uri, conf);
  Token<?> tokens[] = fs.addDelegationTokens("foo", null);
  fs.close();
  Assert.assertEquals(1, tokens.length);
  fs = FileSystem.get(uri, conf);
  ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
  fs.listStatus(new Path("/"));
  fs.close();
}
 
Example 2
Source File: NativeAzureFileSystemBaseTest.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testCopyFromLocalFileSystem() throws Exception {
  Path localFilePath = new Path(System.getProperty("test.build.data",
      "azure_test"));
  FileSystem localFs = FileSystem.get(new Configuration());
  localFs.delete(localFilePath, true);
  try {
    writeString(localFs, localFilePath, "Testing");
    Path dstPath = new Path("copiedFromLocal");
    assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
        fs.getConf()));
    assertTrue(fs.exists(dstPath));
    assertEquals("Testing", readString(fs, dstPath));
    fs.delete(dstPath, true);
  } finally {
    localFs.delete(localFilePath, true);
  }
}
 
Example 3
Source File: TestCodec.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
Example 4
Source File: HDFSStorageTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testCleanup() throws IOException
{
  RandomAccessFile r = new RandomAccessFile(testMeta.testFile, "r");
  r.seek(0);
  byte[] b = r.readLine().getBytes();
  storage.store(new Slice(b, 0, b.length));
  byte[] val = storage.store(new Slice(b, 0, b.length));
  storage.flush();
  storage.clean(val);
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  boolean exists = fs.exists(new Path(STORAGE_DIRECTORY + "/" + "0"));
  Assert.assertEquals("file should not exist", false, exists);
  r.close();
}
 
Example 5
Source File: ClientDistributedCacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a boolean to denote whether a cache file is visible to all(public)
 * or not
 * @param conf
 * @param uri
 * @return true if the path in the uri is visible to all, false otherwise
 * @throws IOException
 */
static boolean isPublic(Configuration conf, URI uri,
    Map<URI, FileStatus> statCache) throws IOException {
  FileSystem fs = FileSystem.get(uri, conf);
  Path current = new Path(uri.getPath());
  //the leaf level file should be readable by others
  if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
    return false;
  }
  return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
 
Example 6
Source File: TestCTASQuery.java    From incubator-tajo with Apache License 2.0 5 votes vote down vote up
@Test
public final void testCtasWithoutTableDefinition() throws Exception {
  ResultSet res = executeQuery();

  res.close();
  CatalogService catalog = testBase.getTestingCluster().getMaster().getCatalog();
  TableDesc desc = catalog.getTableDesc("testCtasWithoutTableDefinition");
  assertTrue(catalog.existsTable("testCtasWithoutTableDefinition"));

  assertTrue(desc.getSchema().contains("testCtasWithoutTableDefinition.col1"));
  PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
  assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
  assertEquals("key", partitionDesc.getExpressionSchema().getColumns().get(0).getSimpleName());

  FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
  Path path = desc.getPath();
  assertTrue(fs.isDirectory(path));
  assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
  assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
  assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
  assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
  assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
  assertEquals(5, desc.getStats().getNumRows().intValue());

  ResultSet res2 = executeFile("check1.sql");

  Map<Double, int []> resultRows1 = Maps.newHashMap();
  resultRows1.put(45.0d, new int[]{3, 2});
  resultRows1.put(38.0d, new int[]{2, 2});

  int i = 0;
  while(res2.next()) {
    assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
    assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
    i++;
  }
  res2.close();
  assertEquals(2, i);
}
 
Example 7
Source File: SpliceTableAdmin.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void CHECK_INDEX(String schemaName, String tableName, String indexName, int level,
                               String outputFile, boolean fix, final ResultSet[] resultSet) throws Exception {
    FSDataOutputStream out = null;
    FileSystem fs = null;
    Map<String, List<String>> errors = null;

    try {
        LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
        Activation activation = lcc.getLastActivation();

        Configuration conf = (Configuration) SIDriver.driver().getConfiguration().getConfigSource().unwrapDelegate();
        String schema = EngineUtils.validateSchema(schemaName);
        String table = EngineUtils.validateTable(tableName);
        String index = EngineUtils.validateTable(indexName);

        fs = FileSystem.get(URI.create(outputFile), conf);
        out = fs.create(new Path(outputFile));

        errors = checkTable(schema, table, index, level, fix);
        resultSet[0] = processResults(errors, out, activation, outputFile);

    } finally {
        if (out != null) {
            out.close();
            if (errors == null || errors.size() == 0) {
                fs.delete(new Path(outputFile), true);
            }
        }

    }

}
 
Example 8
Source File: TestHttpFSPorts.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testWebHdfsCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
Example 9
Source File: TestFileOutputFormat.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void configure(JobConf conf) {
  try {
    FileSystem fs = FileSystem.get(conf);
    OutputStream os =
      fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
    os.write(1);
    os.close();
  }
  catch (IOException ex) {
    throw new RuntimeException(ex);
  }
}
 
Example 10
Source File: HiveOrcSerDeManager.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public HiveOrcSerDeManager(State props)
    throws IOException {
  super(props);
  this.fs = FileSystem.get(HadoopUtils.getConfFromState(props));

  List<String> extensions = props.getPropAsList(FILE_EXTENSIONS_KEY, DEFAULT_FILE_EXTENSIONS);
  this.fileExtensions = extensions.isEmpty() ? ImmutableList.of("") : extensions;

  this.ignoredFilePrefixes = props.getPropAsList(IGNORED_FILE_PREFIXES_KEY, DEFAULT_IGNORED_FILE_PREFIXES);
  this.checkOrcFormat = props.getPropAsBoolean(ENABLED_ORC_TYPE_CHECK, DEFAULT_ENABLED_ORC_TYPE_CHECK);
  this.metricContext = Instrumented.getMetricContext(props, HiveOrcSerDeManager.class);
  this.serDeWrapper = HiveSerDeWrapper.get(props.getProp(SERDE_TYPE_KEY, DEFAULT_SERDE_TYPE),
      Optional.of(props.getProp(INPUT_FORMAT_CLASS_KEY, DEFAULT_INPUT_FORMAT_CLASS)),
      Optional.of(props.getProp(OUTPUT_FORMAT_CLASS_KEY, DEFAULT_OUTPUT_FORMAT_CLASS)));
}
 
Example 11
Source File: BaseTestHttpFSWith.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void testOpen() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  fs.close();
  fs = getHttpFSFileSystem();
  InputStream is = fs.open(new Path(path.toUri().getPath()));
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example 12
Source File: SegmentReader.java    From nutch-htmlunit with Apache License 2.0 5 votes vote down vote up
public SegmentReader(Configuration conf, boolean co, boolean fe, boolean ge, boolean pa,
        boolean pd, boolean pt) {
  super(conf);
  this.co = co;
  this.fe = fe;
  this.ge = ge;
  this.pa = pa;
  this.pd = pd;
  this.pt = pt;
  try {
    this.fs = FileSystem.get(getConf());
  } catch (IOException e) {
    LOG.error("IOException:", e);
  }
}
 
Example 13
Source File: FlinkYarnJobLauncher.java    From sylph with Apache License 2.0 5 votes vote down vote up
private void cleanupStagingDir(Path uploadingDir)
{
    try {
        FileSystem hdfs = FileSystem.get(yarnClient.getConfig());
        if (hdfs.delete(uploadingDir, true)) {
            logger.info("Deleted staging directory {}", uploadingDir);
        }
    }
    catch (IOException e) {
        logger.warn("Failed to cleanup staging dir {}", uploadingDir, e);
    }
}
 
Example 14
Source File: LouvainMasterCompute.java    From distributed-graph-analytics with Apache License 2.0 5 votes vote down vote up
private void writeFile(String path, String message) {
    Path pt = new Path(path);
    logger.debug("Writing file out to {}, message {}", path, message);
    try {
        FileSystem fs = FileSystem.get(new Configuration());
        BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, true)));
        br.write(message);
        br.close();
    } catch (IOException e) {
        e.printStackTrace();
        throw new IllegalStateException("Could not write to file: " + path);
    }
}
 
Example 15
Source File: IntegrationTestMobCompaction.java    From hbase with Apache License 2.0 5 votes vote down vote up
private  long getNumberOfMobFiles(Configuration conf, TableName tableName, String family)
    throws IOException {
  FileSystem fs = FileSystem.get(conf);
  Path dir = MobUtils.getMobFamilyPath(conf, tableName, family);
  FileStatus[] stat = fs.listStatus(dir);
  for (FileStatus st : stat) {
    LOG.debug("MOB Directory content: {}", st.getPath());
  }
  LOG.debug("MOB Directory content total files: {}", stat.length);

  return stat.length;
}
 
Example 16
Source File: AvroStockFileWrite.java    From hiped2 with Apache License 2.0 4 votes vote down vote up
/**
 * Write the sequence file.
 *
 * @param args the command-line arguments
 * @return the process exit code
 * @throws Exception if something goes wrong
 */
public int run(final String[] args) throws Exception {

  Cli cli = Cli.builder().setArgs(args).addOptions(CliCommonOpts.IOFileOpts.values()).build();
  int result = cli.runCmd();

  if (result != 0) {
    return result;
  }

  File inputFile = new File(cli.getArgValueAsString(CliCommonOpts.IOFileOpts.INPUT));
  Path outputPath = new Path(cli.getArgValueAsString(CliCommonOpts.IOFileOpts.OUTPUT));

  Configuration conf = super.getConf();

  FileSystem hdfs = FileSystem.get(conf);

  OutputStream os = hdfs.create(outputPath);
  writeToAvro(inputFile, os);

  return 0;
}
 
Example 17
Source File: FsRenameCommitStep.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
private FileSystem getFileSystem(String fsUri) throws IOException {
  return FileSystem.get(URI.create(fsUri), HadoopUtils.getConfFromState(this.props));
}
 
Example 18
Source File: DefaultRemoteDirectoryManager.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Override
public FileSystem getFileSystemByUri(String uri) throws IOException {
  return FileSystem.get(URI.create(uri), conf);
}
 
Example 19
Source File: GetMergeFiles.java    From data-polygamy with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
public static void main(String[] args) throws IllegalArgumentException, IOException, URISyntaxException {
    String fromDirectory = args[0];
    String toEventsDirectory = args[1];
    String toOutliersDirectory = args[2];
    String metadataFile = args[3];
    
    // Detecting datasets.
    
    HashSet<String> datasets = new HashSet<String>();
    
    FileReader fileReader = new FileReader(metadataFile);
    BufferedReader bufferedReader = new BufferedReader(fileReader);

    String line;
    while((line = bufferedReader.readLine()) != null) {
        String[] parts = line.split(",");
        datasets.add(parts[0]);
    }    
    bufferedReader.close();
    
    // Downloading relationships.
    
    String relationshipPatternStr = "([a-zA-Z0-9]{4}\\-[a-zA-Z0-9]{4})\\-([a-zA-Z0-9]{4}\\-[a-zA-Z0-9]{4})";
    Pattern relationshipPattern = Pattern.compile(relationshipPatternStr);
    
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    FileSystem localFS = FileSystem.getLocal(conf);

    for (FileStatus status : fs.listStatus(new Path(fs.getHomeDirectory() + "/" + fromDirectory))) {
        if (!status.isDirectory()) {
            continue;
        }
        Path file = status.getPath();
        
        Matcher m = relationshipPattern.matcher(file.getName());
        if (!m.find()) continue;
        
        String ds1 = m.group(1);
        String ds2 = m.group(2);
        
        if (!datasets.contains(ds1)) continue;
        if (!datasets.contains(ds2)) continue;
        
        for (FileStatus statusDir : fs.listStatus(file)) {
            if (!statusDir.isDirectory()) {
                continue;
            }
            
            Path fromPath = statusDir.getPath();
            String toPathStr;
            if (fromPath.getName().contains("events")) {
                toPathStr = toEventsDirectory + "/" +
                        fromPath.getParent().getName() + "-" + fromPath.getName();
            } else {
                toPathStr = toOutliersDirectory + "/" +
                        fromPath.getParent().getName() + "-" + fromPath.getName();
            }
            Path toPath = new Path(toPathStr);
            
            System.out.println("Copying:");
            System.out.println("  From: " + fromPath.toString());
            System.out.println("  To: " + toPath.toString());
            
            FileUtil.copyMerge(
                    fs, // HDFS File System
                    fromPath, // HDFS path
                    localFS, // Local File System
                    toPath, // Local Path
                    false, // Do not delete HDFS path
                    conf, // Configuration
                    null);
        }
    }
}
 
Example 20
Source File: DistributedCacheEmulator.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Create distributed cache directory where distributed cache files will be
 * created by the MapReduce job {@link GenerateDistCacheData#JOB_NAME}.
 * @throws IOException
 */
private void createDistCacheDirectory() throws IOException {
  FileSystem fs = FileSystem.get(conf);
  FileSystem.mkdirs(fs, distCachePath, new FsPermission((short) 0777));
}