Java Code Examples for org.apache.hadoop.fs.FileSystem.getDefaultUri()

The following are Jave code examples for showing how to use getDefaultUri() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestDistributedFileSystem.java   Source Code and License Vote up 6 votes
@Test
public void testFileSystemCloseAll() throws Exception {
  Configuration conf = getTestConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  URI address = FileSystem.getDefaultUri(conf);

  try {
    FileSystem.closeAll();

    conf = getTestConfiguration();
    FileSystem.setDefaultUri(conf, address);
    FileSystem.get(conf);
    FileSystem.get(conf);
    FileSystem.closeAll();
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 2
Project: hadoop   File: TestHarFileSystemWithHA.java   Source Code and License Vote up 6 votes
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    
    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);
    
    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Project: hadoop   File: NativeAzureFileSystem.java   Source Code and License Vote up 6 votes
/**
 * Puts in the authority of the default file system if it is a WASB file
 * system and the given URI's authority is null.
 * 
 * @return The URI with reconstructed authority if necessary and possible.
 */
private static URI reconstructAuthorityIfNeeded(URI uri, Configuration conf) {
  if (null == uri.getAuthority()) {
    // If WASB is the default file system, get the authority from there
    URI defaultUri = FileSystem.getDefaultUri(conf);
    if (defaultUri != null && isWasbScheme(defaultUri.getScheme())) {
      try {
        // Reconstruct the URI with the authority from the default URI.
        return new URI(uri.getScheme(), defaultUri.getAuthority(),
            uri.getPath(), uri.getQuery(), uri.getFragment());
      } catch (URISyntaxException e) {
        // This should never happen.
        throw new Error("Bad URI construction", e);
      }
    }
  }
  return uri;
}
 
Example 4
Project: hadoop   File: SecondaryNameNode.java   Source Code and License Vote up 5 votes
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private URL getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
      scheme);
  LOG.debug("Will connect to NameNode at " + address);
  return address.toURL();
}
 
Example 5
Project: hadoop   File: TestEncryptionZones.java   Source Code and License Vote up 5 votes
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
 
Example 6
Project: hadoop   File: TestHdfsAdmin.java   Source Code and License Vote up 5 votes
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 7
Project: hadoop   File: Logalyzer.java   Source Code and License Vote up 5 votes
/**
 * doArchive: Workhorse function to archive log-files.
 * @param logListURI : The uri which will serve list of log-files to archive.
 * @param archiveDirectory : The directory to store archived logfiles.
 * @throws IOException
 */
public void	
  doArchive(String logListURI, String archiveDirectory)
  throws IOException
{
  String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
  DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
}
 
Example 8
Project: hadoop-oss   File: NuCypherExtUtilClient.java   Source Code and License Vote up 4 votes
public static InetSocketAddress getNNAddress(Configuration conf) {
  URI filesystemURI = FileSystem.getDefaultUri(conf);
  return getNNAddressCheckLogical(conf, filesystemURI);
}
 
Example 9
Project: hadoop   File: NameNode.java   Source Code and License Vote up 4 votes
public static InetSocketAddress getAddress(Configuration conf) {
  URI filesystemURI = FileSystem.getDefaultUri(conf);
  return getAddress(filesystemURI);
}
 
Example 10
Project: hadoop   File: TestEncryptionZones.java   Source Code and License Vote up 4 votes
@Test(timeout = 120000)
public void testReadWriteUsingWebHdfs() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
      WebHdfsFileSystem.SCHEME);

  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);

  /* Create an unencrypted file for comparison purposes. */
  final Path unencFile = new Path("/unenc");
  final int len = 8192;
  DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);

  /*
   * Create the same file via webhdfs, but this time encrypted. Compare it
   * using both webhdfs and DFS.
   */
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);

  /*
   * Same thing except this time create the encrypted file using DFS.
   */
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);

  /* Verify appending to files works correctly. */
  appendOneByte(fs, unencFile);
  appendOneByte(webHdfsFs, encFile1);
  appendOneByte(fs, encFile2);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);
}
 
Example 11
Project: hadoop   File: TestCopyFiles.java   Source Code and License Vote up 4 votes
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 12
Project: hadoop   File: TestCopyFiles.java   Source Code and License Vote up 4 votes
/** test -delete */
public void testDelete() throws Exception {
  final Configuration conf = new Configuration();
  conf.setInt("fs.trash.interval", 60);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final URI nnURI = FileSystem.getDefaultUri(conf);
    final String nnUri = nnURI.toString();
    final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

    final DistCpV1 distcp = new DistCpV1(conf);
    final FsShell shell = new FsShell(conf);  

    final String srcrootdir = "/src_root";
    final String dstrootdir = "/dst_root";

    {
      //create source files
      createFiles(nnURI, srcrootdir);
      String srcresults = execCmd(shell, "-lsr", srcrootdir);
      srcresults = removePrefix(srcresults, srcrootdir);
      System.out.println("srcresults=" +  srcresults);

      //create some files in dst
      createFiles(nnURI, dstrootdir);
      System.out.println("dstrootdir=" +  dstrootdir);
      shell.run(new String[]{"-lsr", dstrootdir});

      //run distcp
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log",
                       nnUri+srcrootdir, nnUri+dstrootdir});

      //make sure src and dst contains the same files
      String dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("first dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);

      //create additional file in dst
      create(fs, new Path(dstrootdir, "foo"));
      create(fs, new Path(dstrootdir, "foobar"));

      //run distcp again
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log2",
                       nnUri+srcrootdir, nnUri+dstrootdir});
      
      //make sure src and dst contains the same files
      dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("second dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);
      // verify that files removed in -delete were moved to the trash
      // regrettably, this test will break if Trash changes incompatibly
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foo")));
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foobar")));

      //cleanup
      deldir(fs, dstrootdir);
      deldir(fs, srcrootdir);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}