org.apache.hadoop.hdfs.MiniDFSCluster Java Examples

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRecovery.java    From tez with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  LOG.info("Starting mini clusters");
  try {
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
        .format(true).racks(null).build();
    remoteFs = dfsCluster.getFileSystem();
  } catch (IOException io) {
    throw new RuntimeException("problem starting mini dfs cluster", io);
  }
  if (miniTezCluster == null) {
    miniTezCluster = new MiniTezCluster(TestRecovery.class.getName(), 1, 1, 1);
    Configuration miniTezconf = new Configuration(conf);
    miniTezconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 4);
    miniTezconf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
    miniTezconf.setLong(TezConfiguration.TEZ_AM_SLEEP_TIME_BEFORE_EXIT_MILLIS, 500);
    miniTezCluster.init(miniTezconf);
    miniTezCluster.start();
  }
}
 
Example #2
Source File: TestStartup.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");
  
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example #3
Source File: TestJobSysDirWithDFS.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void testWithDFS() throws IOException {
  MiniDFSCluster dfs = null;
  MiniMRCluster mr = null;
  FileSystem fileSys = null;
  try {
    final int taskTrackers = 4;

    JobConf conf = new JobConf();
    conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system");
    dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf);

    runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir"));
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown();
    }
  }
}
 
Example #4
Source File: TestMountd.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();
  
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
Example #5
Source File: TestWriteToReplica.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testClose() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test close
    testClose(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
 
Example #6
Source File: BaseTestMiniDFS.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
/**
 * Start a MiniDFS cluster backed SabotNode cluster
 * @param testClass
 * @param isImpersonationEnabled Enable impersonation in the cluster?
 * @throws Exception
 */
protected static void startMiniDfsCluster(final String testClass, Configuration configuration) throws Exception {
  Preconditions.checkArgument(!Strings.isNullOrEmpty(testClass), "Expected a non-null and non-empty test class name");
  dfsConf = Preconditions.checkNotNull(configuration);

  // Set the MiniDfs base dir to be the temp directory of the test, so that all files created within the MiniDfs
  // are properly cleanup when test exits.
  miniDfsStoragePath = Files.createTempDirectory(testClass).toString();
  dfsConf.set("hdfs.minidfs.basedir", miniDfsStoragePath);
  // HDFS-8880 and HDFS-8953 introduce metrics logging that requires log4j, but log4j is explicitly
  // excluded in build. So disable logging to avoid NoClassDefFoundError for Log4JLogger.
  dfsConf.set("dfs.namenode.metrics.logger.period.seconds", "0");
  dfsConf.set("dfs.datanode.metrics.logger.period.seconds", "0");

  // Start the MiniDfs cluster
  dfsCluster = new MiniDFSCluster.Builder(dfsConf)
      .numDataNodes(3)
      .format(true)
      .build();

  fs = dfsCluster.getFileSystem();
}
 
Example #7
Source File: HATestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Wait for the datanodes in the cluster to process any block
 * deletions that have already been asynchronously queued.
 */
public static void waitForDNDeletions(final MiniDFSCluster cluster)
    throws TimeoutException, InterruptedException {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      for (DataNode dn : cluster.getDataNodes()) {
        if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) {
          return false;
        }
      }
      return true;
    }
  }, 1000, 10000);
  
}
 
Example #8
Source File: TestFileLink.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example #9
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example #10
Source File: TestWriteToReplica.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testAppend() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test append
    testAppend(bpid, dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
 
Example #11
Source File: SparkMiniCluster.java    From spork with Apache License 2.0 6 votes vote down vote up
@Override
protected void setupMiniDfsAndMrClusters() {
    try {
        CONF_DIR.mkdirs();
        if (CONF_FILE.exists()) {
            CONF_FILE.delete();
        }
        m_conf = new Configuration();
        m_conf.set("io.sort.mb", "1");
        m_conf.writeXml(new FileOutputStream(CONF_FILE));
        int dataNodes = 4;
        m_dfs = new MiniDFSCluster(m_conf, dataNodes, true, null);
        m_fileSys = m_dfs.getFileSystem();
    } catch (IOException e) {
        throw new RuntimeException(e);

    }

}
 
Example #12
Source File: TestFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testFsckNonExistent() throws Exception {
  DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
      setNumFiles(20).build();
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, "/srcdat");
    util.waitReplication(fs, "/srcdat", (short)3);
    String outStr = runFsck(conf, 0, true, "/non-existent");
    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
    System.out.println(outStr);
    util.cleanup(fs, "/srcdat");
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #13
Source File: TestCacheAdminCLI.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
Example #14
Source File: TestNameNodeRpcServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testNamenodeRpcBindAny() throws IOException {
  Configuration conf = new HdfsConfiguration();

  // The name node in MiniDFSCluster only binds to 127.0.0.1.
  // We can set the bind address to 0.0.0.0 to make it listen
  // to all interfaces.
  conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
        .getClientRpcServer().getListenerAddress().getHostName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    // Reset the config
    conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
  }
}
 
Example #15
Source File: TestHadoopNNAWithStreamEngine.java    From NNAnalytics with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  GSetGenerator gSetGenerator = new GSetGenerator();
  gSetGenerator.clear();
  GSet<INode, INodeWithAdditionalFields> gset = gSetGenerator.getGSet((short) 3, 10, 500);
  nna = new HadoopWebServerMain();
  ApplicationConfiguration conf = new ApplicationConfiguration();
  conf.set("ldap.enable", "false");
  conf.set("authorization.enable", "false");
  conf.set("nna.historical", "false");
  conf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory());
  conf.set("nna.web.base.dir", "src/main/resources/webapps/nna");
  conf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName());
  nna.init(conf, gset);
  hostPort = new HttpHost("localhost", 4567);
}
 
Example #16
Source File: TestDataNodeVolumeFailureToleration.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
}
 
Example #17
Source File: TestBackupNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
  long thisCheckpointTxId;
  do {
    try {
      LOG.info("Waiting checkpoint to complete... " +
          "checkpoint txid should increase above " + txid);
      Thread.sleep(1000);
    } catch (Exception e) {}
    // The checkpoint is not done until the nn has received it from the bn
    thisCheckpointTxId = cluster.getNameNode().getFSImage().getStorage()
      .getMostRecentCheckpointTxId();
  } while (thisCheckpointTxId < txid);
  // Check that the checkpoint got uploaded to NN successfully
  FSImageTestUtil.assertNNHasCheckpoints(cluster,
      Collections.singletonList((int)thisCheckpointTxId));
}
 
Example #18
Source File: BucketingSinkTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example #19
Source File: TestDataNodeMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testReceivePacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();

    Path testFile = new Path("/testFlushNanosMetric.txt");
    FSDataOutputStream fout = fs.create(testFile);
    fout.write(new byte[1]);
    fout.hsync();
    fout.close();
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
    // Expect two flushes, 1 for the flush that occurs after writing, 
    // 1 that occurs on closing the data and metadata files.
    assertCounter("FlushNanosNumOps", 2L, dnMetrics);
    // Expect two syncs, one from the hsync, one on close.
    assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the receivePacket percentiles that should be non-zero
    String sec = interval + "s";
    assertQuantileGauges("FlushNanos" + sec, dnMetrics);
    assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example #20
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #21
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
  cluster.waitClusterUp();
  URI uri0 = cluster.getURI(0);
  fc = FileContext.getFileContext(uri0, CONF);
  defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
 
Example #22
Source File: TestJobHistoryEventHandler.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  coreSitePath = "." + File.separator + "target" + File.separator +
          "test-classes" + File.separator + "core-site.xml";
  Configuration conf = new HdfsConfiguration();
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
}
 
Example #23
Source File: TestDatamerge.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public static Test suite() {
  TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) {
    protected void setUp() throws Exception {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
    }
    protected void tearDown() throws Exception {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  };
  return setup;
}
 
Example #24
Source File: TestDFSIsUnderConstruction.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();
  conf = new Configuration();
  cluster = new MiniDFSCluster(conf, 2, true, new String[]{"/rack1", "/rack2"});
  cluster.waitClusterUp();
  fs = cluster.getFileSystem();
}
 
Example #25
Source File: TestEncryptedShuffle.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
Example #26
Source File: TestBlockUnderConstruction.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
}
 
Example #27
Source File: TestHadoopNNAAuthorization.java    From NNAnalytics with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  GSetGenerator gSetGenerator = new GSetGenerator();
  gSetGenerator.clear();
  GSet<INode, INodeWithAdditionalFields> gset = gSetGenerator.getGSet((short) 3, 10, 500);
  nna = new HadoopWebServerMain();
  ApplicationConfiguration conf = new ApplicationConfiguration();
  conf.set("ldap.enable", "false");
  conf.set("authorization.enable", "true");
  conf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory());
  conf.set("nna.web.base.dir", "src/main/resources/webapps/nna");
  nna.init(conf, gset);
  hostPort = new HttpHost("localhost", 4567);
}
 
Example #28
Source File: TestCLI.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();
  
  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;
  
   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
 
Example #29
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .format(true).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
}
 
Example #30
Source File: TestFSRMStateStore.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testFSRMStateStore() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  MiniDFSCluster cluster =
          new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  try {
    fsTester = new TestFSRMStateStoreTester(cluster, false);
    // If the state store is FileSystemRMStateStore then add corrupted entry.
    // It should discard the entry and remove it from file system.
    FSDataOutputStream fsOut = null;
    FileSystemRMStateStore fileSystemRMStateStore =
            (FileSystemRMStateStore) fsTester.getRMStateStore();
    String appAttemptIdStr3 = "appattempt_1352994193343_0001_000003";
    ApplicationAttemptId attemptId3 =
            ConverterUtils.toApplicationAttemptId(appAttemptIdStr3);
    Path appDir =
            fsTester.store.getAppDir(attemptId3.getApplicationId().toString());
    Path tempAppAttemptFile =
            new Path(appDir, attemptId3.toString() + ".tmp");
    fsOut = fileSystemRMStateStore.fs.create(tempAppAttemptFile, false);
    fsOut.write("Some random data ".getBytes());
    fsOut.close();

    testRMAppStateStore(fsTester);
    Assert.assertFalse(fsTester.workingDirPathURI
            .getFileSystem(conf).exists(tempAppAttemptFile));
    testRMDTSecretManagerStateStore(fsTester);
    testCheckVersion(fsTester);
    testEpoch(fsTester);
    testAppDeletion(fsTester);
    testDeleteStore(fsTester);
    testAMRMTokenSecretManagerStateStore(fsTester);
  } finally {
    cluster.shutdown();
  }
}