Java Code Examples for org.apache.hadoop.fs.FileSystem.newInstance()

The following are Jave code examples for showing how to use newInstance() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop-oss   File: TestFileSystemCaching.java   Source Code and License Vote up 7 votes
@Test
public void testFsUniqueness() throws Exception {
  final Configuration conf = new Configuration();
  conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  // multiple invocations of FileSystem.get return the same object.
  FileSystem fs1 = FileSystem.get(conf);
  FileSystem fs2 = FileSystem.get(conf);
  assertTrue(fs1 == fs2);

  // multiple invocations of FileSystem.newInstance return different objects
  fs1 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  fs2 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  assertTrue(fs1 != fs2 && !fs1.equals(fs2));
  fs1.close();
  fs2.close();
}
 
Example 2
Project: hadoop   File: TestDisableConnCache.java   Source Code and License Vote up 6 votes
/**
 * Test that the socket cache can be disabled by setting the capacity to
 * 0. Regression test for HDFS-3365.
 * @throws Exception 
 */
@Test
public void testDisableCache() throws Exception {
  HdfsConfiguration confWithoutCache = new HdfsConfiguration();
  // Configure a new instance with no peer caching, ensure that it doesn't
  // cache anything
  confWithoutCache.setInt(
      DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
  BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
  final Path testFile = new Path("/testConnCache.dat");
  util.writeFile(testFile, FILE_SIZE / 1024);
  FileSystem fsWithoutCache = FileSystem.newInstance(util.getConf());
  try {
    DFSTestUtil.readFile(fsWithoutCache, testFile);
    assertEquals(0, ((DistributedFileSystem)fsWithoutCache).
        dfs.getClientContext().getPeerCache().size());
  } finally {
    fsWithoutCache.close();
    util.shutdown();
  }
}
 
Example 3
Project: hadoop   File: TestFileSystemCaching.java   Source Code and License Vote up 6 votes
@Test
public void testFsUniqueness() throws Exception {
  final Configuration conf = new Configuration();
  conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  // multiple invocations of FileSystem.get return the same object.
  FileSystem fs1 = FileSystem.get(conf);
  FileSystem fs2 = FileSystem.get(conf);
  assertTrue(fs1 == fs2);

  // multiple invocations of FileSystem.newInstance return different objects
  fs1 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  fs2 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  assertTrue(fs1 != fs2 && !fs1.equals(fs2));
  fs1.close();
  fs2.close();
}
 
Example 4
Project: kafka-connect-fs   File: AbstractPolicy.java   Source Code and License Vote up 5 votes
private void configFs(Map<String, Object> customConfigs) throws IOException {
    for (String uri : this.conf.getFsUris()) {
        Configuration fsConfig = new Configuration();
        customConfigs.entrySet().stream()
                .filter(entry -> entry.getKey().startsWith(FsSourceTaskConfig.POLICY_PREFIX_FS))
                .forEach(entry -> fsConfig.set(entry.getKey().replace(FsSourceTaskConfig.POLICY_PREFIX_FS, ""),
                        (String) entry.getValue()));

        Path workingDir = new Path(convert(uri));
        FileSystem fs = FileSystem.newInstance(workingDir.toUri(), fsConfig);
        fs.setWorkingDirectory(workingDir);
        this.fileSystems.add(fs);
    }
}
 
Example 5
Project: kafka-connect-fs   File: HdfsPolicyTestBase.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void initFs() throws IOException {
    clusterConfig = new Configuration();
    hdfsDir = Files.createTempDirectory("test-");
    clusterConfig.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.toAbsolutePath().toString());
    cluster = new MiniDFSCluster.Builder(clusterConfig).build();
    fsUri = URI.create("hdfs://localhost:" + cluster.getNameNodePort() + "/");
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
Example 6
Project: kafka-connect-fs   File: HdfsFileReaderTestBase.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void initFs() throws IOException {
    clusterConfig = new Configuration();
    hdfsDir = Files.createTempDirectory("test-");
    clusterConfig.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.toAbsolutePath().toString());
    cluster = new MiniDFSCluster.Builder(clusterConfig).build();
    fsUri = URI.create("hdfs://localhost:" + cluster.getNameNodePort() + "/");
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
Example 7
Project: kafka-connect-fs   File: HdfsFsSourceTaskTestBase.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void initFs() throws IOException {
    clusterConfig = new Configuration();
    hdfsDir = Files.createTempDirectory("test-");
    clusterConfig.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.toAbsolutePath().toString());
    cluster = new MiniDFSCluster.Builder(clusterConfig).build();
    fsUri = URI.create("hdfs://localhost:" + cluster.getNameNodePort() + "/");
    fs = FileSystem.newInstance(fsUri, clusterConfig);
}
 
Example 8
Project: hadoop   File: TestLease.java   Source Code and License Vote up 5 votes
/**
 * Test that we can open up a file for write, move it to another location,
 * and then create a new file in the previous location, without causing any
 * lease conflicts.  This is possible because we now use unique inode IDs
 * to identify files to the NameNode.
 */
@Test
public void testLeaseAfterRenameAndRecreate() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  try {
    final Path path1 = new Path("/test-file");
    final String contents1 = "contents1";
    final Path path2 = new Path("/test-file-new-location");
    final String contents2 = "contents2";

    // open a file to get a lease
    FileSystem fs = cluster.getFileSystem();
    FSDataOutputStream out1 = fs.create(path1);
    out1.writeBytes(contents1);
    Assert.assertTrue(hasLease(cluster, path1));
    Assert.assertEquals(1, leaseCount(cluster));

    DistributedFileSystem fs2 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    fs2.rename(path1, path2);

    FSDataOutputStream out2 = fs2.create(path1);
    out2.writeBytes(contents2);
    out2.close();

    // The first file should still be open and valid
    Assert.assertTrue(hasLease(cluster, path2));
    out1.close();

    // Contents should be as expected
    DistributedFileSystem fs3 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
    Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
  } finally {
    cluster.shutdown();
  }
}
 
Example 9
Project: kafka-connect-fs   File: LocalPolicyTestBase.java   Source Code and License Vote up 4 votes
@BeforeClass
public static void initFs() throws IOException {
    localDir = Files.createTempDirectory("test-");
    fsUri = localDir.toUri();
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
Example 10
Project: kafka-connect-fs   File: LocalFileReaderTestBase.java   Source Code and License Vote up 4 votes
@BeforeClass
public static void initFs() throws IOException {
    localDir = Files.createTempDirectory("test-");
    fsUri = localDir.toUri();
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
Example 11
Project: kafka-connect-fs   File: LocalFsSourceTaskTestBase.java   Source Code and License Vote up 4 votes
@BeforeClass
public static void initFs() throws IOException {
    localDir = Files.createTempDirectory("test-");
    fsUri = localDir.toUri();
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
Example 12
Project: kafka-connect-hdfs   File: HdfsStorage.java   Source Code and License Vote up 4 votes
public HdfsStorage(Configuration conf,  String url) throws IOException {
  fs = FileSystem.newInstance(URI.create(url), conf);
  this.conf = conf;
  this.url = url;
}
 
Example 13
Project: hadoop   File: TestLeaseRecovery.java   Source Code and License Vote up 4 votes
/**
 * Block Recovery when the meta file not having crcs for all chunks in block
 * file
 */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
      UserGroupInformation.getCurrentUser().getShortUserName());
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Path file = new Path("/testRecoveryFile");
  DistributedFileSystem dfs = cluster.getFileSystem();
  FSDataOutputStream out = dfs.create(file);
  int count = 0;
  while (count < 2 * 1024 * 1024) {
    out.writeBytes("Data");
    count += 4;
  }
  out.hsync();
  // abort the original stream
  ((DFSOutputStream) out.getWrappedStream()).abort();

  LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
      file.toString(), 0, count);
  ExtendedBlock block = locations.get(0).getBlock();
  DataNode dn = cluster.getDataNodes().get(0);
  BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
  File metafile = new File(localPathInfo.getMetaPath());
  assertTrue(metafile.exists());

  // reduce the block meta file size
  RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
  raf.setLength(metafile.length() - 20);
  raf.close();

  // restart DN to make replica to RWR
  DataNodeProperties dnProp = cluster.stopDataNode(0);
  cluster.restartDataNode(dnProp, true);

  // try to recover the lease
  DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
      .newInstance(cluster.getConfiguration(0));
  count = 0;
  while (++count < 10 && !newdfs.recoverLease(file)) {
    Thread.sleep(1000);
  }
  assertTrue("File should be closed", newdfs.recoverLease(file));

}
 
Example 14
Project: hadoop   File: MiniDFSCluster.java   Source Code and License Vote up 2 votes
/**
 * Get another FileSystem instance that is different from FileSystem.get(conf).
 * This simulating different threads working on different FileSystem instances.
 */
public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException {
  return FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf);
}