Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getURI()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getURI() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFSRMStateStore.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public TestFSRMStateStoreTester(MiniDFSCluster cluster, boolean adminCheckEnable) throws Exception {
  Path workingDirPath = new Path("/yarn/Test");
  this.adminCheckEnable = adminCheckEnable;
  this.cluster = cluster;
  FileSystem fs = cluster.getFileSystem();
  fs.mkdirs(workingDirPath);
  Path clusterURI = new Path(cluster.getURI());
  workingDirPathURI = new Path(clusterURI, workingDirPath);
  fs.close();
}
 
Example 2
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestFSRMStateStore.java    From big-c with Apache License 2.0 5 votes vote down vote up
public TestFSRMStateStoreTester(MiniDFSCluster cluster, boolean adminCheckEnable) throws Exception {
  Path workingDirPath = new Path("/yarn/Test");
  this.adminCheckEnable = adminCheckEnable;
  this.cluster = cluster;
  FileSystem fs = cluster.getFileSystem();
  fs.mkdirs(workingDirPath);
  Path clusterURI = new Path(cluster.getURI());
  workingDirPathURI = new Path(clusterURI, workingDirPath);
  fs.close();
}
 
Example 4
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 5
Source File: HdfsTestUtil.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static String getURI(MiniDFSCluster dfsCluster) {
  if (dfsCluster.getNameNodeInfos().length > 1) {
    String logicalName = String.format(Locale.ENGLISH, LOGICAL_HOSTNAME, dfsCluster.getInstanceId()); // NOTE: hdfs uses default locale
    return "hdfs://" + logicalName;
  } else {
    URI uri = dfsCluster.getURI(0);
    return uri.toString();
  }
}
 
Example 6
Source File: HdfsTestUtil.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
public static String getDataDir(MiniDFSCluster dfsCluster, String dataDir)
    throws IOException {
  URI uri = dfsCluster.getURI();
  String dir = uri.toString()
      + "/"
      + new File(dataDir).toString().replaceAll(":", "_")
          .replaceAll("/", "_");
  return dir;
}
 
Example 7
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 8
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}