Java Code Examples for org.apache.hadoop.fs.FileSystem.get()

The following are Jave code examples for showing how to use get() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestWebHdfsWithMultipleNameNodes.java   Source Code and License Vote up 6 votes
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();
  
  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
 
Example 2
Project: hadoop   File: TestHftpFileSystem.java   Source Code and License Vote up 6 votes
@Test
public void testHftpCustomDefaultPorts() throws IOException {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);

  URI uri = URI.create("hftp://localhost");
  HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());

  assertEquals(uri, fs.getUri());

  // HFTP uses http to get the token so canonical service name should
  // return the http port.
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
Example 3
Project: hadoop-oss   File: TestViewFileSystemDelegation.java   Source Code and License Vote up 6 votes
private static FileSystem setupMockFileSystem(Configuration conf, URI uri)
    throws Exception {
  String scheme = uri.getScheme();
  conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName());
  FileSystem fs = FileSystem.get(uri, conf);
  ConfigUtil.addLink(conf, "/mounts/" + scheme, uri);
  return ((MockFileSystem)fs).getRawFileSystem();
}
 
Example 4
Project: hadoop   File: TestHttpFSPorts.java   Source Code and License Vote up 5 votes
@Test
public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost:789");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
 
Example 5
Project: ditb   File: SnapshotInfo.java   Source Code and License Vote up 5 votes
/**
 * Returns the list of available snapshots in the specified location
 * @param conf the {@link Configuration} to use
 * @return the list of snapshots
 */
public static List<SnapshotDescription> getSnapshotList(final Configuration conf)
    throws IOException {
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
  Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
  FileStatus[] snapshots = fs.listStatus(snapshotDir,
    new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
  List<SnapshotDescription> snapshotLists =
    new ArrayList<SnapshotDescription>(snapshots.length);
  for (FileStatus snapshotDirStat: snapshots) {
    snapshotLists.add(SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()));
  }
  return snapshotLists;
}
 
Example 6
Project: hadoop   File: TestHftpFileSystem.java   Source Code and License Vote up 5 votes
@Test
public void testTimeout() throws IOException {
  Configuration conf = new Configuration();
  URI uri = URI.create("hftp://localhost");
  HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
  URLConnection conn = fs.connectionFactory.openConnection(new URL(
      "http://localhost"));
  assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
      conn.getConnectTimeout());
  assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
      conn.getReadTimeout());
}
 
Example 7
Project: hadoop   File: TestDistCpUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPreserveNothingOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(dstStatus.getAccessTime() == 100);
  Assert.assertTrue(dstStatus.getModificationTime() == 100);
  Assert.assertTrue(dstStatus.getReplication() == 0);
}
 
Example 8
Project: ditb   File: TestHFileInlineToRootChunkConversion.java   Source Code and License Vote up 5 votes
@Test
public void testWriteHFile() throws Exception {
  Path hfPath = new Path(testUtil.getDataTestDir(),
      TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile");
  int maxChunkSize = 1024;
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
  HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
  HFileWriterV2 hfw =
      (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
          .withFileContext(context)
          .withPath(fs, hfPath).create();
  List<byte[]> keys = new ArrayList<byte[]>();
  StringBuilder sb = new StringBuilder();

  for (int i = 0; i < 4; ++i) {
    sb.append("key" + String.format("%05d", i));
    sb.append("_");
    for (int j = 0; j < 100; ++j) {
      sb.append('0' + j);
    }
    String keyStr = sb.toString();
    sb.setLength(0);

    byte[] k = Bytes.toBytes(keyStr);
    keys.add(k);
    byte[] v = Bytes.toBytes("value" + i);
    hfw.append(CellUtil.createCell(k, v));
  }
  hfw.close();

  HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf, conf);
  // Scanner doesn't do Cells yet.  Fix.
  HFileScanner scanner = reader.getScanner(true, true);
  for (int i = 0; i < keys.size(); ++i) {
    scanner.seekTo(CellUtil.createCell(keys.get(i)));
  }
  reader.close();
}
 
Example 9
Project: hadoop   File: TestViewFileSystemDelegationTokenSupport.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setup() throws Exception {
  conf = ViewFileSystemTestSetup.createConfig();
  fs1 = setupFileSystem(new URI("fs1:///"), FakeFileSystem.class);
  fs2 = setupFileSystem(new URI("fs2:///"), FakeFileSystem.class);
  viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
Example 10
Project: hadoop   File: TestHftpFileSystem.java   Source Code and License Vote up 5 votes
@Test
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
  Configuration conf = new Configuration();
  URI uri = URI.create("hsftp://localhost:123");
  HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);

  assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
      fs.getDefaultPort());

  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
Example 11
Project: HadoopGuides   File: FileCopyWithProgress.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws IOException {
    final String localSrc = "/tmp/log/bigdata.pdf";
    final String hdfsUri = "hdfs://master:8020/test/bigdata.pdf";
    InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(hdfsUri), conf);
    OutputStream out = fs.create(new Path(hdfsUri), new Progressable() {
        // progress只有在Hadoop文件系统是HDFS的时候才调用,local,S3,FTP都不会调用
        @Override
        public void progress() {
            System.out.print(">");
        }
    });
    IOUtils.copyBytes(in, out, 4096, true);
}
 
Example 12
Project: ditb   File: IntegrationTestImportTsv.java   Source Code and License Vote up 5 votes
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
 
Example 13
Project: aliyun-maxcompute-data-collectors   File: TestExport.java   Source Code and License Vote up 5 votes
/**
 * Create a data file that gets exported to the db.
 * @param fileNum the number of the file (for multi-file export)
 * @param numRecords how many records to write to the file.
 * @param gzip is true if the file should be gzipped.
 */
protected void createTextFile(int fileNum, int numRecords, boolean gzip,
    ColumnGenerator... extraCols) throws IOException {
  int startId = fileNum * numRecords;

  String ext = ".txt";
  if (gzip) {
    ext = ext + ".gz";
  }
  Path tablePath = getTablePath();
  Path filePath = new Path(tablePath, "part" + fileNum + ext);

  Configuration conf = new Configuration();
  if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
    conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
  }
  FileSystem fs = FileSystem.get(conf);
  fs.mkdirs(tablePath);
  OutputStream os = fs.create(filePath);
  if (gzip) {
    CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
    CompressionCodec codec = ccf.getCodec(filePath);
    os = codec.createOutputStream(os);
  }
  BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
  for (int i = 0; i < numRecords; i++) {
    w.write(getRecordLine(startId + i, extraCols));
  }
  w.close();
  os.close();

  if (gzip) {
    verifyCompressedFile(filePath, numRecords);
  }
}
 
Example 14
Project: ditb   File: TestCompoundBloomFilter.java   Source Code and License Vote up 5 votes
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();

  // This test requires the most recent HFile format (i.e. v2).
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);

  fs = FileSystem.get(conf);

  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
  assertNotNull(blockCache);
}
 
Example 15
Project: MXNetOnYARN   File: Client.java   Source Code and License Vote up 5 votes
/**
 * constructor
 * @throws IOException
 */
private Client() throws IOException {
    conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/core-site.xml"));
    conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/hdfs-site.xml"));
    dfs = FileSystem.get(conf);
    userName = UserGroupInformation.getCurrentUser().getShortUserName();
    credentials = UserGroupInformation.getCurrentUser().getCredentials();
}
 
Example 16
Project: hadoop   File: TestFileSystemCaching.java   Source Code and License Vote up 5 votes
@Test
public void testCacheDisabled() throws Exception {
  Configuration conf = new Configuration();
  conf.set("fs.uncachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
  FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
  FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
  assertNotSame(fs1, fs2);
}
 
Example 17
Project: ditb   File: TestHFileWriterV2.java   Source Code and License Vote up 4 votes
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  fs = FileSystem.get(conf);
}
 
Example 18
Project: hadoop   File: TestNetworkedJob.java   Source Code and License Vote up 4 votes
@Test (timeout=500000)
public void testGetJobStatus() throws IOException, InterruptedException,
    ClassNotFoundException {
  MiniMRClientCluster mr = null;
  FileSystem fileSys = null;

  try {
    mr = createMiniClusterWithCapacityScheduler();

    JobConf job = new JobConf(mr.getConfig());

    fileSys = FileSystem.get(job);
    fileSys.delete(testDir, true);
    FSDataOutputStream out = fileSys.create(inFile, true);
    out.writeBytes("This is a test file");
    out.close();

    FileInputFormat.setInputPaths(job, inFile);
    FileOutputFormat.setOutputPath(job, outDir);

    job.setInputFormat(TextInputFormat.class);
    job.setOutputFormat(TextOutputFormat.class);

    job.setMapperClass(IdentityMapper.class);
    job.setReducerClass(IdentityReducer.class);
    job.setNumReduceTasks(0);

    JobClient client = new JobClient(mr.getConfig());
    RunningJob rj = client.submitJob(job);
    JobID jobId = rj.getID();

    // The following asserts read JobStatus twice and ensure the returned
    // JobStatus objects correspond to the same Job.
    assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId)
        .getJobStatus().getJobID());
    assertEquals("Expected matching startTimes", rj.getJobStatus()
        .getStartTime(), client.getJob(jobId).getJobStatus()
        .getStartTime());
  } finally {
    if (fileSys != null) {
      fileSys.delete(testDir, true);
    }
    if (mr != null) {
      mr.stop();
    }
  }
}
 
Example 19
Project: ditb   File: TestHFileLinkCleaner.java   Source Code and License Vote up 4 votes
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final TableName tableName = TableName.valueOf("test-table");
  final TableName tableLinkName = TableName.valueOf("test-link");
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(tableName);
  HRegionInfo hriLink = new HRegionInfo(tableLinkName);

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
      FSUtils.getTableDir(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
 
Example 20
Project: hadoop   File: TestSpecialCharactersInOutputPath.java   Source Code and License Vote up 4 votes
public static boolean launchJob(URI fileSys,
                                JobConf conf,
                                int numMaps,
                                int numReduces) throws IOException {
  
  final Path inDir = new Path("/testing/input");
  final Path outDir = new Path("/testing/output");
  FileSystem fs = FileSystem.get(fileSys, conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    LOG.warn("Can't create " + inDir);
    return false;
  }
  // generate an input file
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes("foo foo2 foo3");
  file.close();

  // use WordCount example
  FileSystem.setDefaultUri(conf, fileSys);
  conf.setJobName("foo");

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(SpecialTextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);
  conf.setMapperClass(IdentityMapper.class);        
  conf.setReducerClass(IdentityReducer.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
    
  // run job and wait for completion
  RunningJob runningJob = JobClient.runJob(conf);
    
  try {
    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
  } catch (NullPointerException npe) {
    // This NPE should no more happens
    fail("A NPE should not have happened.");
  }
        
  // return job result
  LOG.info("job is complete: " + runningJob.isSuccessful());
  return (runningJob.isSuccessful());
}