Java Code Examples for org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestHAStateTransitions.java    License: Apache License 2.0 6 votes vote down vote up
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
 
Example 2
Source Project: big-c   Source File: TestHAStateTransitions.java    License: Apache License 2.0 6 votes vote down vote up
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
 
Example 3
@Before
public void setUp() throws Exception {
  admin = UserGroupInformation.createUserForTesting(
      System.getProperty("user.name"), new String[] { "supergroup" });
  admin.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
      Configuration conf = new HdfsConfiguration();
      conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
      conf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
          MockSentryAuthorizationProvider.class.getName());
      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
      EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
      miniDFS = new MiniDFSCluster.Builder(conf).build();
      return null;
    }
  });
}
 
Example 4
Source Project: hadoop   Source File: TestCachingStrategy.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);
  
  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
 
Example 5
Source Project: big-c   Source File: TestCachingStrategy.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);
  
  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
 
Example 6
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 5 votes vote down vote up
public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
    throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
  Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
    "ERROR");

  TraceUtil.initTracer(conf);

  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, racks, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;
  String dataTestDir = getDataTestDir().toString();
  conf.set(HConstants.HBASE_DIR, dataTestDir);
  LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);

  return this.dfsCluster;
}
 
Example 7
Source Project: tez   Source File: TestSecureShuffle.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupDFSCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());
  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}
 
Example 8
Source Project: tez   Source File: TestPipelinedShuffle.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupDFSCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());
  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}
 
Example 9
Source Project: tez   Source File: TestAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupClass() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  dfsCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = dfsCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());

  setupTezCluster();
}
 
Example 10
Source Project: tez   Source File: TestHistoryParser.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());

  setupTezCluster();
}
 
Example 11
Source Project: hadoop   Source File: BinaryEditsVisitor.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Create a processor that writes to a given file
 * @param outputName Name of file to write output to
 */
public BinaryEditsVisitor(String outputName) throws IOException {
  this.elfos = new EditLogFileOutputStream(new Configuration(),
    new File(outputName), 0);
  elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
}
 
Example 12
Source Project: hadoop   Source File: TestDFSUpgrade.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testPreserveEditLogs() throws Exception {
  conf = new HdfsConfiguration();
  conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);

  log("Normal NameNode upgrade", 1);
  File[] created =
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  for (final File createdDir : created) {
    List<String> fileNameList =
        IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
    for (String fileName : fileNameList) {
      String tmpFileName = fileName + ".tmp";
      File existingFile = new File(createdDir, fileName);
      File tmpFile = new File(createdDir, tmpFileName);
      Files.move(existingFile.toPath(), tmpFile.toPath());
      File newFile = new File(createdDir, fileName);
      Preconditions.checkState(newFile.createNewFile(),
          "Cannot create new edits log file in " + createdDir);
      EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
          HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
          false);
      EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
          (int)tmpFile.length());
      out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
      FSEditLogOp logOp = in.readOp();
      while (logOp != null) {
        out.write(logOp);
        logOp = in.readOp();
      }
      out.setReadyToFlush();
      out.flushAndSync(true);
      out.close();
      Files.delete(tmpFile.toPath());
    }
  }

  cluster = createCluster();

  DFSInotifyEventInputStream ieis =
      cluster.getFileSystem().getInotifyEventStream(0);
  EventBatch batch = ieis.poll();
  Event[] events = batch.getEvents();
  assertTrue("Should be able to get transactions before the upgrade.",
      events.length > 0);
  assertEquals(events[0].getEventType(), Event.EventType.CREATE);
  assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
  cluster.shutdown();
  UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
 
Example 13
Source Project: hadoop   Source File: MiniDFSCluster.java    License: Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example 14
Source Project: big-c   Source File: BinaryEditsVisitor.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Create a processor that writes to a given file
 * @param outputName Name of file to write output to
 */
public BinaryEditsVisitor(String outputName) throws IOException {
  this.elfos = new EditLogFileOutputStream(new Configuration(),
    new File(outputName), 0);
  elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
}
 
Example 15
Source Project: big-c   Source File: TestDFSUpgrade.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testPreserveEditLogs() throws Exception {
  conf = new HdfsConfiguration();
  conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);

  log("Normal NameNode upgrade", 1);
  File[] created =
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  for (final File createdDir : created) {
    List<String> fileNameList =
        IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
    for (String fileName : fileNameList) {
      String tmpFileName = fileName + ".tmp";
      File existingFile = new File(createdDir, fileName);
      File tmpFile = new File(createdDir, tmpFileName);
      Files.move(existingFile.toPath(), tmpFile.toPath());
      File newFile = new File(createdDir, fileName);
      Preconditions.checkState(newFile.createNewFile(),
          "Cannot create new edits log file in " + createdDir);
      EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
          HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
          false);
      EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
          (int)tmpFile.length());
      out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
      FSEditLogOp logOp = in.readOp();
      while (logOp != null) {
        out.write(logOp);
        logOp = in.readOp();
      }
      out.setReadyToFlush();
      out.flushAndSync(true);
      out.close();
      Files.delete(tmpFile.toPath());
    }
  }

  cluster = createCluster();

  DFSInotifyEventInputStream ieis =
      cluster.getFileSystem().getInotifyEventStream(0);
  EventBatch batch = ieis.poll();
  Event[] events = batch.getEvents();
  assertTrue("Should be able to get transactions before the upgrade.",
      events.length > 0);
  assertEquals(events[0].getEventType(), Event.EventType.CREATE);
  assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
  cluster.shutdown();
  UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
 
Example 16
Source Project: big-c   Source File: MiniDFSCluster.java    License: Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example 17
Source Project: incubator-sentry   Source File: TestHDFSIntegration.java    License: Apache License 2.0 4 votes vote down vote up
private static void startDFSandYARN() throws IOException,
    InterruptedException {
  adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
      hadoopConf = new HdfsConfiguration();
      hadoopConf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
          SentryAuthorizationProvider.class.getName());
      hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
      hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
      File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
      hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
      hadoopConf.set("hadoop.security.group.mapping",
          MiniDFS.PseudoGroupMappingService.class.getName());
      Configuration.addDefaultResource("test.xml");

      hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse,/tmp/external");
      hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
      hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms", String.valueOf(CACHE_REFRESH));

      hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms", String.valueOf(STALE_THRESHOLD));

      hadoopConf.set("sentry.hdfs.service.security.mode", "none");
      hadoopConf.set("sentry.hdfs.service.client.server.rpc-address", "localhost");
      hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
      EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
      miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
      Path tmpPath = new Path("/tmp");
      Path hivePath = new Path("/user/hive");
      Path warehousePath = new Path(hivePath, "warehouse");
      miniDFS.getFileSystem().mkdirs(warehousePath);
      boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
      LOGGER.info("\n\n Is dir :" + directory + "\n\n");
      LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
      fsURI = miniDFS.getFileSystem().getUri().toString();
      hadoopConf.set("fs.defaultFS", fsURI);

      // Create Yarn cluster
      // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

      miniDFS.getFileSystem().mkdirs(tmpPath);
      miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
      miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
      miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
      LOGGER.info("\n\n Owner :"
          + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner()
          + ", "
          + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup()
          + "\n\n");
      LOGGER.info("\n\n Owner tmp :"
          + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
          + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
          + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", "
          + "\n\n");

      int dfsSafeCheckRetry = 30;
      boolean hasStarted = false;
      for (int i = dfsSafeCheckRetry; i > 0; i--) {
        if (!miniDFS.getFileSystem().isInSafeMode()) {
          hasStarted = true;
          LOGGER.info("HDFS safemode check num times : " + (31 - i));
          break;
        }
      }
      if (!hasStarted) {
        throw new RuntimeException("HDFS hasnt exited safe mode yet..");
      }

      return null;
    }
  });
}