org.apache.hadoop.mapred.MiniMRClientClusterFactory Java Examples

The following examples show how to use org.apache.hadoop.mapred.MiniMRClientClusterFactory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestMRCredentials.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
  System.setProperty("hadoop.log.dir", "logs");
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
      .build();
  jConf = new JobConf(conf);
  FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
  mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
  createKeysAsJson("keys.json");
}
 
Example #2
Source File: TestEncryptedShuffle.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
Example #3
Source File: GridmixTestUtils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static void initCluster(Class<?> caller) throws IOException {
    Configuration conf = new Configuration();
//    conf.set("mapred.queue.names", "default,q1,q2");
  conf.set("mapred.queue.names", "default");
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
    
    
    conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
    conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
    

    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
        .build();// MiniDFSCluster(conf, 3, true, null);
    dfs = dfsCluster.getFileSystem();
    conf.set(JTConfig.JT_RETIREJOBS, "false");
    mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
    
    conf = mrvl.getConfig();
    String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
    if (files != null) {
      String[] timestamps = new String[files.length];
      for (int i = 0; i < files.length; i++) {
        timestamps[i] = Long.toString(System.currentTimeMillis());
      }
      conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
    }
    
  }
 
Example #4
Source File: TestMRCredentials.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
  System.setProperty("hadoop.log.dir", "logs");
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
      .build();
  jConf = new JobConf(conf);
  FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
  mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
  createKeysAsJson("keys.json");
}
 
Example #5
Source File: TestEncryptedShuffle.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
Example #6
Source File: GridmixTestUtils.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void initCluster(Class<?> caller) throws IOException {
    Configuration conf = new Configuration();
//    conf.set("mapred.queue.names", "default,q1,q2");
  conf.set("mapred.queue.names", "default");
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
    
    
    conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
    conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
    

    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
        .build();// MiniDFSCluster(conf, 3, true, null);
    dfs = dfsCluster.getFileSystem();
    conf.set(JTConfig.JT_RETIREJOBS, "false");
    mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
    
    conf = mrvl.getConfig();
    String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
    if (files != null) {
      String[] timestamps = new String[files.length];
      for (int i = 0; i < files.length; i++) {
        timestamps[i] = Long.toString(System.currentTimeMillis());
      }
      conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
    }
    
  }
 
Example #7
Source File: YarnTestBinding.java    From mr4c with Apache License 2.0 5 votes vote down vote up
private void startMrCluster() throws IOException {
	Configuration conf = new JobConf();
	FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri());
	conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
	conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
	String addr = MiniYARNCluster.getHostname() + ":0";
	conf.set(YarnConfiguration.RM_ADDRESS, addr);
	conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr);
	m_mrCluster = MiniMRClientClusterFactory.create(
		HadoopTestUtils.class,
		"MR4CTests",
		1, // num node managers
		conf
	);

	// make sure startup is finished
	for ( int i=0; i<60; i++ ) {
		String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
		if ( newAddr.equals(addr) ) {
			s_log.warn("MiniYARNCluster startup not complete");
			try {
				Thread.sleep(1000);
			} catch (InterruptedException ie) {
				throw new IOException(ie);
			}
		} else {
			s_log.info("MiniYARNCluster now available at {}", newAddr);
			return;
		}
	}
	throw new IOException("MiniYARNCluster taking too long to startup");

}
 
Example #8
Source File: TestLargeSort.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() throws IOException {
  Configuration conf = new YarnConfiguration();
  cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
  cluster.start();
}
 
Example #9
Source File: MiniHadoopClusterManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Starts DFS and MR clusters, as specified in member-variable options. Also
 * writes out configuration and details, if requested.
 *
 * @throws IOException
 * @throws FileNotFoundException
 * @throws URISyntaxException
 */
public void start() throws IOException, FileNotFoundException,
    URISyntaxException {
  if (!noDFS) {
    dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
        .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
    LOG.info("Started MiniDFSCluster -- namenode on port "
        + dfs.getNameNodePort());
  }
  if (!noMR) {
    if (fs == null && dfs != null) {
      fs = dfs.getFileSystem().getUri().toString();
    } else if (fs == null) {
      fs = "file:///tmp/minimr-" + System.nanoTime();
    }
    FileSystem.setDefaultUri(conf, new URI(fs));
    // Instruct the minicluster to use fixed ports, so user will know which
    // ports to use when communicating with the cluster.
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.rmPort);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.jhsPort);
    mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
        conf);
    LOG.info("Started MiniMRCluster");
  }

  if (writeConfig != null) {
    FileOutputStream fos = new FileOutputStream(new File(writeConfig));
    conf.writeXml(fos);
    fos.close();
  }

  if (writeDetails != null) {
    Map<String, Object> map = new TreeMap<String, Object>();
    if (dfs != null) {
      map.put("namenode_port", dfs.getNameNodePort());
    }
    if (mr != null) {
      map.put("resourcemanager_port", mr.getConfig().get(
          YarnConfiguration.RM_ADDRESS).split(":")[1]);
    }
    FileWriter fw = new FileWriter(new File(writeDetails));
    fw.write(new JSON().toJSON(map));
    fw.close();
  }
}
 
Example #10
Source File: TestDistCh.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void testDistCh() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
  final MiniDFSCluster cluster=  new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
  
  final FileSystem fs = cluster.getFileSystem();
  final FsShell shell = new FsShell(conf);
  
  try {
    final FileTree tree = new FileTree(fs, "testDistCh");
    final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);

    runLsr(shell, tree.root, 0);

    final String[] args = new String[NUN_SUBS];
    final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];

    
    args[0]="/test/testDistCh/sub0:sub1::";
    newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");

    args[1]="/test/testDistCh/sub1::sub2:";
    newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");

    args[2]="/test/testDistCh/sub2:::437";
    newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");

    args[3]="/test/testDistCh/sub3:sub1:sub2:447";
    newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
 
    args[4]="/test/testDistCh/sub4::sub5:437";
    newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");

    args[5]="/test/testDistCh/sub5:sub1:sub5:";
    newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");

    args[6]="/test/testDistCh/sub6:sub3::437";
    newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
    
    System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n  "));
    System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n  "));

    //run DistCh
    new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
    runLsr(shell, tree.root, 0);

    //check results
    for(int i = 0; i < NUN_SUBS; i++) {
      Path sub = new Path(tree.root + "/sub" + i);
      checkFileStatus(newstatus[i], fs.getFileStatus(sub));
      for(FileStatus status : fs.listStatus(sub)) {
        checkFileStatus(newstatus[i], status);
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #11
Source File: TestLargeSort.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() throws IOException {
  Configuration conf = new YarnConfiguration();
  cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
  cluster.start();
}
 
Example #12
Source File: MiniHadoopClusterManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Starts DFS and MR clusters, as specified in member-variable options. Also
 * writes out configuration and details, if requested.
 *
 * @throws IOException
 * @throws FileNotFoundException
 * @throws URISyntaxException
 */
public void start() throws IOException, FileNotFoundException,
    URISyntaxException {
  if (!noDFS) {
    dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
        .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
    LOG.info("Started MiniDFSCluster -- namenode on port "
        + dfs.getNameNodePort());
  }
  if (!noMR) {
    if (fs == null && dfs != null) {
      fs = dfs.getFileSystem().getUri().toString();
    } else if (fs == null) {
      fs = "file:///tmp/minimr-" + System.nanoTime();
    }
    FileSystem.setDefaultUri(conf, new URI(fs));
    // Instruct the minicluster to use fixed ports, so user will know which
    // ports to use when communicating with the cluster.
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.rmPort);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.jhsPort);
    mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
        conf);
    LOG.info("Started MiniMRCluster");
  }

  if (writeConfig != null) {
    FileOutputStream fos = new FileOutputStream(new File(writeConfig));
    conf.writeXml(fos);
    fos.close();
  }

  if (writeDetails != null) {
    Map<String, Object> map = new TreeMap<String, Object>();
    if (dfs != null) {
      map.put("namenode_port", dfs.getNameNodePort());
    }
    if (mr != null) {
      map.put("resourcemanager_port", mr.getConfig().get(
          YarnConfiguration.RM_ADDRESS).split(":")[1]);
    }
    FileWriter fw = new FileWriter(new File(writeDetails));
    fw.write(new JSON().toJSON(map));
    fw.close();
  }
}
 
Example #13
Source File: TestDistCh.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testDistCh() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
  final MiniDFSCluster cluster=  new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
  
  final FileSystem fs = cluster.getFileSystem();
  final FsShell shell = new FsShell(conf);
  
  try {
    final FileTree tree = new FileTree(fs, "testDistCh");
    final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);

    runLsr(shell, tree.root, 0);

    final String[] args = new String[NUN_SUBS];
    final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];

    
    args[0]="/test/testDistCh/sub0:sub1::";
    newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");

    args[1]="/test/testDistCh/sub1::sub2:";
    newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");

    args[2]="/test/testDistCh/sub2:::437";
    newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");

    args[3]="/test/testDistCh/sub3:sub1:sub2:447";
    newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
 
    args[4]="/test/testDistCh/sub4::sub5:437";
    newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");

    args[5]="/test/testDistCh/sub5:sub1:sub5:";
    newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");

    args[6]="/test/testDistCh/sub6:sub3::437";
    newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
    
    System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n  "));
    System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n  "));

    //run DistCh
    new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
    runLsr(shell, tree.root, 0);

    //check results
    for(int i = 0; i < NUN_SUBS; i++) {
      Path sub = new Path(tree.root + "/sub" + i);
      checkFileStatus(newstatus[i], fs.getFileStatus(sub));
      for(FileStatus status : fs.listStatus(sub)) {
        checkFileStatus(newstatus[i], status);
      }
    }
  } finally {
    cluster.shutdown();
  }
}