Java Code Examples for org.apache.hadoop.fs.FileSystem#getDefaultUri()

The following examples show how to use org.apache.hadoop.fs.FileSystem#getDefaultUri() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private static FileSystem getTrashFileSystem(Configuration conf) throws IOException {
  conf = new Configuration(conf);
  conf.set("fs.shell.delete.classname",
      "org.apache.hadoop.fs.TrashPolicyDefault.deleteCheckpoint");
  InetSocketAddress serviceAddress = NameNode.getDNProtocolAddress(conf);
  if (serviceAddress != null) {
    URI defaultUri = FileSystem.getDefaultUri(conf);
    URI serviceUri = null;
    try {
      serviceUri = new URI(defaultUri.getScheme(), defaultUri.getUserInfo(),
          serviceAddress.getHostName(), serviceAddress.getPort(),
          defaultUri.getPath(), defaultUri.getQuery(),
          defaultUri.getFragment());
    } catch (URISyntaxException uex) {
      throw new IOException("Failed to initialize a uri for trash FS");
    }
    Path trashFsPath = new Path(serviceUri.toString());
    return trashFsPath.getFileSystem(conf);
  } else {
    return FileSystem.get(conf);
  }
}
 
Example 2
Source File: HdfsResourceLoader.java    From ambiverse-nlu with Apache License 2.0 6 votes vote down vote up
public HdfsResourceLoader(Configuration config, URI uri, String user) {
    this.pathMatcher = new AntPathMatcher();
    this.internalFS = true;
    FileSystem tempFS = null;

    try {
        if (uri == null) {
            uri = FileSystem.getDefaultUri(config);
        }

        tempFS = user != null ? FileSystem.get(uri, config, user) : FileSystem.get(uri, config);
    } catch (Exception var9) {
        tempFS = null;
        throw new IllegalStateException("Cannot create filesystem", var9);
    } finally {
        this.fs = tempFS;
    }

}
 
Example 3
Source File: AbstractHadoopProcessor.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
protected void checkHdfsUriForTimeout(Configuration config) throws IOException {
    URI hdfsUri = FileSystem.getDefaultUri(config);
    String address = hdfsUri.getAuthority();
    int port = hdfsUri.getPort();
    if (address == null || address.isEmpty() || port < 0) {
        return;
    }
    InetSocketAddress namenode = NetUtils.createSocketAddr(address, port);
    SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(config);
    Socket socket = null;
    try {
        socket = socketFactory.createSocket();
        NetUtils.connect(socket, namenode, 1000); // 1 second timeout
    } finally {
        IOUtils.closeQuietly(socket);
    }
}
 
Example 4
Source File: TestHarFileSystemWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    
    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);
    
    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestDistributedFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFileSystemCloseAll() throws Exception {
  Configuration conf = getTestConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  URI address = FileSystem.getDefaultUri(conf);

  try {
    FileSystem.closeAll();

    conf = getTestConfiguration();
    FileSystem.setDefaultUri(conf, address);
    FileSystem.get(conf);
    FileSystem.get(conf);
    FileSystem.closeAll();
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 6
Source File: AbstractHdfsConnector.java    From pulsar with Apache License 2.0 6 votes vote down vote up
protected void checkHdfsUriForTimeout(Configuration config) throws IOException {
    URI hdfsUri = FileSystem.getDefaultUri(config);
    String address = hdfsUri.getAuthority();
    int port = hdfsUri.getPort();
    if (address == null || address.isEmpty() || port < 0) {
        return;
    }
    InetSocketAddress namenode = NetUtils.createSocketAddr(address, port);
    SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(config);
    Socket socket = null;
    try {
        socket = socketFactory.createSocket();
        NetUtils.connect(socket, namenode, 1000); // 1 second timeout
    } finally {
        IOUtils.closeQuietly(socket);
    }
}
 
Example 7
Source File: NativeAzureFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Puts in the authority of the default file system if it is a WASB file
 * system and the given URI's authority is null.
 * 
 * @return The URI with reconstructed authority if necessary and possible.
 */
private static URI reconstructAuthorityIfNeeded(URI uri, Configuration conf) {
  if (null == uri.getAuthority()) {
    // If WASB is the default file system, get the authority from there
    URI defaultUri = FileSystem.getDefaultUri(conf);
    if (defaultUri != null && isWasbScheme(defaultUri.getScheme())) {
      try {
        // Reconstruct the URI with the authority from the default URI.
        return new URI(uri.getScheme(), defaultUri.getAuthority(),
            uri.getPath(), uri.getQuery(), uri.getFragment());
      } catch (URISyntaxException e) {
        // This should never happen.
        throw new Error("Bad URI construction", e);
      }
    }
  }
  return uri;
}
 
Example 8
Source File: FileSystemKey.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private URI resolveURI(URI uri, Configuration configuration) {
  String scheme = uri.getScheme();
  String authority = uri.getAuthority();

  if (scheme == null && authority == null) {     // use default FS
    return FileSystem.getDefaultUri(configuration);
  }

  if (scheme != null && authority == null) {     // no authority
    URI defaultUri = FileSystem.getDefaultUri(configuration);
    if (scheme.equals(defaultUri.getScheme())    // if scheme matches default
        && defaultUri.getAuthority() != null) {  // & default has authority
      return defaultUri;                         // return default
    }
  }

  try {
    return new URI(scheme, Strings.nullToEmpty(authority), "/", null, null);
  } catch (URISyntaxException use) {
    // This should never happen
    throw new RuntimeException(use);
  }
}
 
Example 9
Source File: TestEncryptionZones.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
 
Example 10
Source File: SecondaryNameNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private String getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!"hdfs".equals(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }
  return NetUtils.getServerAddress(conf, "dfs.info.bindAddress", 
                                   "dfs.info.port", "dfs.http.address");
}
 
Example 11
Source File: Logalyzer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * doArchive: Workhorse function to archive log-files.
 * @param logListURI : The uri which will serve list of log-files to archive.
 * @param archiveDirectory : The directory to store archived logfiles.
 * @throws IOException
 */
public void	
  doArchive(String logListURI, String archiveDirectory)
  throws IOException
{
  String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
  DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
}
 
Example 12
Source File: TestHdfsAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 13
Source File: NameNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public static InetSocketAddress getAddress(Configuration conf) {
  URI uri = FileSystem.getDefaultUri(conf);
  String authority = uri.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, uri.toString()));
  }
  return getAddress(authority);
}
 
Example 14
Source File: ProxiedFileSystemCache.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private static URI resolveUri(URI uri, Configuration configuration, FileSystem fileSystem) throws IOException {
  if (uri != null) {
    return uri;
  }
  if (fileSystem != null) {
    return fileSystem.getUri();
  }
  if (configuration != null) {
    return FileSystem.getDefaultUri(configuration);
  }
  throw new IOException("FileSystem URI could not be determined from available inputs.");
}
 
Example 15
Source File: SecondaryNameNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private URL getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
      scheme);
  LOG.debug("Will connect to NameNode at " + address);
  return address.toURL();
}
 
Example 16
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 17
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** test -delete */
public void testDelete() throws Exception {
  final Configuration conf = new Configuration();
  conf.setInt("fs.trash.interval", 60);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final URI nnURI = FileSystem.getDefaultUri(conf);
    final String nnUri = nnURI.toString();
    final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

    final DistCpV1 distcp = new DistCpV1(conf);
    final FsShell shell = new FsShell(conf);  

    final String srcrootdir = "/src_root";
    final String dstrootdir = "/dst_root";

    {
      //create source files
      createFiles(nnURI, srcrootdir);
      String srcresults = execCmd(shell, "-lsr", srcrootdir);
      srcresults = removePrefix(srcresults, srcrootdir);
      System.out.println("srcresults=" +  srcresults);

      //create some files in dst
      createFiles(nnURI, dstrootdir);
      System.out.println("dstrootdir=" +  dstrootdir);
      shell.run(new String[]{"-lsr", dstrootdir});

      //run distcp
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log",
                       nnUri+srcrootdir, nnUri+dstrootdir});

      //make sure src and dst contains the same files
      String dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("first dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);

      //create additional file in dst
      create(fs, new Path(dstrootdir, "foo"));
      create(fs, new Path(dstrootdir, "foobar"));

      //run distcp again
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log2",
                       nnUri+srcrootdir, nnUri+dstrootdir});
      
      //make sure src and dst contains the same files
      dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("second dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);
      // verify that files removed in -delete were moved to the trash
      // regrettably, this test will break if Trash changes incompatibly
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foo")));
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foobar")));

      //cleanup
      deldir(fs, dstrootdir);
      deldir(fs, srcrootdir);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 18
Source File: TestEncryptionZones.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 120000)
public void testReadWriteUsingWebHdfs() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
      WebHdfsFileSystem.SCHEME);

  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);

  /* Create an unencrypted file for comparison purposes. */
  final Path unencFile = new Path("/unenc");
  final int len = 8192;
  DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);

  /*
   * Create the same file via webhdfs, but this time encrypted. Compare it
   * using both webhdfs and DFS.
   */
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);

  /*
   * Same thing except this time create the encrypted file using DFS.
   */
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);

  /* Verify appending to files works correctly. */
  appendOneByte(fs, unencFile);
  appendOneByte(webHdfsFs, encFile1);
  appendOneByte(fs, encFile2);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);
}
 
Example 19
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static InetSocketAddress getAddress(Configuration conf) {
  URI filesystemURI = FileSystem.getDefaultUri(conf);
  return getAddress(filesystemURI);
}
 
Example 20
Source File: FileSystemPlugin.java    From Bats with Apache License 2.0 4 votes vote down vote up
private boolean isS3Connection(Configuration conf) {
  URI uri = FileSystem.getDefaultUri(conf);
  return uri.getScheme().equals("s3a");
}