Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#Builder

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#Builder . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RollingSinkITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

	LOG.info("In RollingSinkITCase: Starting MiniDFSCluster ");

	dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/";

	miniClusterResource = new MiniClusterResource(
		new MiniClusterResourceConfiguration.Builder()
			.setNumberTaskManagers(1)
			.setNumberSlotsPerTaskManager(4)
			.build());

	miniClusterResource.before();
}
 
Example 2
Source File: BucketingSinkFaultToleranceITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 3
Source File: TajoTestingCluster.java    From tajo with Apache License 2.0 6 votes vote down vote up
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param dir Where to home your dfs cluster.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 * @throws java.io.IOException
 */
public MiniDFSCluster startMiniDFSCluster(int servers,
                                          File dir,
                                          final String hosts[])
    throws IOException {

  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.getAbsolutePath());
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0);

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
  builder.hosts(hosts);
  builder.numDataNodes(servers);
  builder.format(true);
  builder.waitSafeMode(true);
  this.dfsCluster = builder.build();

  // Set this just-started cluster as our filesystem.
  this.defaultFS = this.dfsCluster.getFileSystem();
  this.conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString());
  this.conf.setVar(TajoConf.ConfVars.ROOT_DIR, defaultFS.getUri() + "/tajo");
  isDFSRunning = true;
  return this.dfsCluster;
}
 
Example 4
Source File: HDFSFileStreamSourceTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  // Start MiniDFSCluster
  config = new Configuration();
  config.set("hdfs.minidfs.basedir", "target/build/test/data/dfs");
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(config).hosts(HOSTS).numDataNodes(1)
      .format(true);
  hdfsCluster = builder.build();
  hdfsCluster.waitActive();
  hdfsURI = "hdfs://localhost:" + hdfsCluster.getNameNodePort();

  // Construct stream source
  streamSource = new HDFSFileStreamSource();

  // General config
  config.set("fs.defaultFS", hdfsURI);
}
 
Example 5
Source File: TestBinaryTokenFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();
  
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
  
  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();
  
  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 
  
  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
 
Example 6
Source File: FileSystemFunctionsTest.java    From metron with Apache License 2.0 6 votes vote down vote up
@BeforeAll
public static void setupFS() throws IOException {
  {
    hdfsBaseDir = Files.createTempDirectory("test_hdfs").toFile().getAbsoluteFile();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBaseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    hdfsPrefix = "/";
  }
  {
    localPrefix = "target/fsTest/";
    if (new File(localPrefix).exists()) {
      new File(localPrefix).delete();
    }
    new File(localPrefix).mkdirs();
  }
}
 
Example 7
Source File: HDFSTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() {
	try {
		Configuration hdConf = new Configuration();

		File baseDir = new File("./target/hdfs/hdfsTest").getAbsoluteFile();
		FileUtil.fullyDelete(baseDir);
		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
		hdfsCluster = builder.build();

		hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

		hdPath = new org.apache.hadoop.fs.Path("/test");
		hdfs = hdPath.getFileSystem(hdConf);
		FSDataOutputStream stream = hdfs.create(hdPath);
		for (int i = 0; i < 10; i++) {
			stream.write("Hello HDFS\n".getBytes(ConfigConstants.DEFAULT_CHARSET));
		}
		stream.close();

	} catch (Throwable e) {
		e.printStackTrace();
		Assert.fail("Test failed " + e.getMessage());
	}
}
 
Example 8
Source File: DistributedCacheDfsTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
	File dataDir = TEMP_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	String hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";

	FileSystem dfs = FileSystem.get(new URI(hdfsURI));
	testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

	testDir = new Path(dfs.getHomeDirectory(), "testDir");
	dfs.mkdirs(testDir);
	writeFile(dfs, testDir, "testFile1");
	writeFile(dfs, testDir, "testFile2");
}
 
Example 9
Source File: RollingSinkFaultToleranceITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 10
Source File: TestDocUseCases.java    From jsr203-hadoop with Apache License 2.0 5 votes vote down vote up
private static MiniDFSCluster startMini(String testName) throws IOException {
	File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
	FileUtil.fullyDelete(baseDir);
	Configuration conf = new Configuration();
	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
	hdfsCluster.waitActive();
	return hdfsCluster;
}
 
Example 11
Source File: CreateHDFSStoreTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example 12
Source File: YarnFileStageTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = CLASS_TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}
 
Example 13
Source File: TestBlockScanner.java    From big-c with Apache License 2.0 5 votes vote down vote up
TestContext(Configuration conf, int numNameServices) throws Exception {
  this.numNameServices = numNameServices;
  MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).
      storagesPerDatanode(1);
  if (numNameServices > 1) {
    bld.nnTopology(MiniDFSNNTopology.
          simpleFederatedTopology(numNameServices));
  }
  cluster = bld.build();
  cluster.waitActive();
  dfs = new DistributedFileSystem[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    dfs[i] = cluster.getFileSystem(i);
  }
  bpids = new String[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
  }
  datanode = cluster.getDataNodes().get(0);
  blockScanner = datanode.getBlockScanner();
  for (int i = 0; i < numNameServices; i++) {
    dfs[i].mkdirs(new Path("/test"));
  }
  data = datanode.getFSDataset();
  volumes = data.getVolumes();
}
 
Example 14
Source File: FileStoreTestBase.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void start() throws Exception {
    File baseDir = Files.createTempDirectory(basePath).toFile().getAbsoluteFile();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();

    hdfsURI = "hdfs://localhost:"+ hdfsCluster.getNameNodePort() + "/";
    Properties properties = new Properties();
    fileSystemManagedLedgerOffloader = new FileSystemManagedLedgerOffloader(
            OffloadPolicies.create(properties),
            scheduler, hdfsURI, basePath);
}
 
Example 15
Source File: HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	final File baseDir = TEMP_FOLDER.newFolder();

	final Configuration hdConf = new Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

	final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();

	final org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem();

	fileSystem = new HadoopFileSystem(hdfs);
	basePath = new Path(hdfs.getUri() + "/tests");
}
 
Example 16
Source File: TestPath.java    From jsr203-hadoop with Apache License 2.0 5 votes vote down vote up
private static MiniDFSCluster startMini(String testName) throws IOException {
  File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  Configuration conf = new Configuration();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
  hdfsCluster.waitActive();
  return hdfsCluster;
}
 
Example 17
Source File: TestBlockScanner.java    From hadoop with Apache License 2.0 5 votes vote down vote up
TestContext(Configuration conf, int numNameServices) throws Exception {
  this.numNameServices = numNameServices;
  MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).
      storagesPerDatanode(1);
  if (numNameServices > 1) {
    bld.nnTopology(MiniDFSNNTopology.
          simpleFederatedTopology(numNameServices));
  }
  cluster = bld.build();
  cluster.waitActive();
  dfs = new DistributedFileSystem[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    dfs[i] = cluster.getFileSystem(i);
  }
  bpids = new String[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
  }
  datanode = cluster.getDataNodes().get(0);
  blockScanner = datanode.getBlockScanner();
  for (int i = 0; i < numNameServices; i++) {
    dfs[i].mkdirs(new Path("/test"));
  }
  data = datanode.getFSDataset();
  volumes = data.getVolumes();
}
 
Example 18
Source File: YarnPreConfiguredMasterHaServicesTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}
 
Example 19
Source File: TestBalancerWithNodeGroup.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
  
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);
    
    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
 
Example 20
Source File: MiniQJMHACluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
public MiniDFSCluster.Builder getDfsBuilder() {
  return dfsBuilder;
}