Java Code Examples for org.apache.hadoop.conf.Configuration.writeXml()

The following are Jave code examples for showing how to use writeXml() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop-oss   File: TestDeprecatedKeys.java   Source Code and License Vote up 6 votes
public void testReadWriteWithDeprecatedKeys() throws Exception {
   Configuration conf = new Configuration();
   conf.setBoolean("old.config.yet.to.be.deprecated", true);
   Configuration.addDeprecation("old.config.yet.to.be.deprecated", 
new String[]{"new.conf.to.replace.deprecated.conf"});
   ByteArrayOutputStream out=new ByteArrayOutputStream();
   String fileContents;
   try {
     conf.writeXml(out);
     fileContents = out.toString();
   } finally {
     out.close();
   }
   assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
   assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
 }
 
Example 2
Project: hadoop   File: TestJobHistoryEventHandler.java   Source Code and License Vote up 6 votes
@Test
public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
  // Test relative path
  Configuration conf = new Configuration();
  conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
      "/mapred/history/done_intermediate");
  conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
  String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
  Assert.assertEquals("/mapred/history/done_intermediate/" +
      System.getProperty("user.name"), pathStr);

  // Test fully qualified path
  // Create default configuration pointing to the minicluster
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
      dfsCluster.getURI().toString());
  FileOutputStream os = new FileOutputStream(coreSitePath);
  conf.writeXml(os);
  os.close();
  // Simulate execution under a non-default namenode
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
          "file:///");
  pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
  Assert.assertEquals(dfsCluster.getURI().toString() +
      "/mapred/history/done_intermediate/" + System.getProperty("user.name"),
      pathStr);
}
 
Example 3
Project: hadoop   File: TestDeprecatedKeys.java   Source Code and License Vote up 6 votes
public void testReadWriteWithDeprecatedKeys() throws Exception {
   Configuration conf = new Configuration();
   conf.setBoolean("old.config.yet.to.be.deprecated", true);
   Configuration.addDeprecation("old.config.yet.to.be.deprecated", 
new String[]{"new.conf.to.replace.deprecated.conf"});
   ByteArrayOutputStream out=new ByteArrayOutputStream();
   String fileContents;
   try {
     conf.writeXml(out);
     fileContents = out.toString();
   } finally {
     out.close();
   }
   assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
   assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
 }
 
Example 4
Project: angel   File: AngelYarnClient.java   Source Code and License Vote up 5 votes
private void writeConf(Configuration conf, Path jobFile) throws IOException {
  // Write job file to JobTracker's fs
  FSDataOutputStream out =
      FileSystem.create(jtFs, jobFile, new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
  try {
    conf.writeXml(out);
  } finally {
    out.close();
  }
}
 
Example 5
Project: ditb   File: ConfServlet.java   Source Code and License Vote up 5 votes
/**
 * Guts of the servlet - extracted for easy testing.
 */
static void writeResponse(Configuration conf, Writer out, String format)
  throws IOException, BadFormatException {
  if (FORMAT_JSON.equals(format)) {
    Configuration.dumpConfiguration(conf, out);
  } else if (FORMAT_XML.equals(format)) {
    conf.writeXml(out);
  } else {
    throw new BadFormatException("Bad format: " + format);
  }
}
 
Example 6
Project: circus-train   File: HiveConfFactoryTest.java   Source Code and License Vote up 5 votes
private static void writeConf(File file, String key, String value) throws IOException {
  Configuration conf = new Configuration(false);
  conf.set(key, value);
  try (FileWriter out = new FileWriter(file)) {
    conf.writeXml(out);
  }
}
 
Example 7
Project: monarch   File: HDFSQuasiService.java   Source Code and License Vote up 5 votes
private void writeConf(Configuration configuration, String hdfsConfOutPath) {
  try {
    configuration.writeXml(new FileWriter(hdfsConfOutPath));
  } catch (IOException e) {
    LOG.error("Error in writing configuration at " + hdfsConfOutPath);
  }
}
 
Example 8
Project: hadoop   File: MRApp.java   Source Code and License Vote up 5 votes
public Job submit(Configuration conf, boolean mapSpeculative,
    boolean reduceSpeculative) throws Exception {
  String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation
      .getCurrentUser().getShortUserName());
  conf.set(MRJobConfig.USER_NAME, user);
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
  conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
  // TODO: fix the bug where the speculator gets events with
  // not-fully-constructed objects. For now, disable speculative exec
  conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
  conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);

  init(conf);
  start();
  DefaultMetricsSystem.shutdown();
  Job job = getContext().getAllJobs().values().iterator().next();
  if (assignedQueue != null) {
    job.setQueueName(assignedQueue);
  }

  // Write job.xml
  String jobFile = MRApps.getJobFile(conf, user,
      TypeConverter.fromYarn(job.getID()));
  LOG.info("Writing job conf to " + jobFile);
  new File(jobFile).getParentFile().mkdirs();
  conf.writeXml(new FileOutputStream(jobFile));

  return job;
}
 
Example 9
Project: hadoop   File: UtilsForTests.java   Source Code and License Vote up 5 votes
static void setUpConfigFile(Properties confProps, File configFile)
    throws IOException {
  Configuration config = new Configuration(false);
  FileOutputStream fos = new FileOutputStream(configFile);

  for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) {
    String key = (String) e.nextElement();
    config.set(key, confProps.getProperty(key));
  }

  config.writeXml(fos);
  fos.close();
}
 
Example 10
Project: hadoop   File: JobSubmitter.java   Source Code and License Vote up 5 votes
private void writeConf(Configuration conf, Path jobFile) 
    throws IOException {
  // Write job file to JobTracker's fs        
  FSDataOutputStream out = 
    FileSystem.create(jtFs, jobFile, 
                      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
  try {
    conf.writeXml(out);
  } finally {
    out.close();
  }
}
 
Example 11
Project: hadoop   File: TestHistoryFileManager.java   Source Code and License Vote up 5 votes
@Test
public void testCreateDirsWithAdditionalFileSystem() throws Exception {
  dfsCluster.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  dfsCluster2.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
  Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());

  // Set default configuration to the first cluster
  Configuration conf = new Configuration(false);
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
          dfsCluster.getURI().toString());
  FileOutputStream os = new FileOutputStream(coreSitePath);
  conf.writeXml(os);
  os.close();

  testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0), true);

  // Directories should be created only in the default file system (dfsCluster)
  Assert.assertTrue(dfsCluster.getFileSystem()
          .exists(new Path(getDoneDirNameForTest())));
  Assert.assertTrue(dfsCluster.getFileSystem()
          .exists(new Path(getIntermediateDoneDirNameForTest())));
  Assert.assertFalse(dfsCluster2.getFileSystem()
          .exists(new Path(getDoneDirNameForTest())));
  Assert.assertFalse(dfsCluster2.getFileSystem()
          .exists(new Path(getIntermediateDoneDirNameForTest())));
}
 
Example 12
Project: ditb   File: KeyStoreTestUtil.java   Source Code and License Vote up 5 votes
/**
 * Saves configuration to a file.
 * 
 * @param file File to save
 * @param conf Configuration contents to write to file
 * @throws IOException if there is an I/O error saving the file
 */
public static void saveConfig(File file, Configuration conf)
    throws IOException {
  Writer writer = new FileWriter(file);
  try {
    conf.writeXml(writer);
  } finally {
    writer.close();
  }
}
 
Example 13
Project: hadoop   File: TestFileSystemAccessService.java   Source Code and License Vote up 5 votes
@Test
@TestDir
public void serviceHadoopConfCustomDir() throws Exception {
  String dir = TestDirHelper.getTestDir().getAbsolutePath();
  String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
  new File(hadoopConfDir).mkdirs();
  String services = StringUtils.join(",",
    Arrays.asList(InstrumentationService.class.getName(),
                  SchedulerService.class.getName(),
                  FileSystemAccessService.class.getName()));
  Configuration conf = new Configuration(false);
  conf.set("server.services", services);
  conf.set("server.hadoop.config.dir", hadoopConfDir);

  File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
  OutputStream os = new FileOutputStream(hdfsSite);
  Configuration hadoopConf = new Configuration(false);
  hadoopConf.set("foo", "BAR");
  hadoopConf.writeXml(os);
  os.close();

  Server server = new Server("server", dir, dir, dir, dir, conf);
  server.init();
  FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
  Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
  server.destroy();
}
 
Example 14
Project: hadoop   File: TestWriteConfigurationToDFS.java   Source Code and License Vote up 5 votes
@Test(timeout=60000)
public void testWriteConf() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  System.out.println("Setting conf in: " + System.identityHashCode(conf));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  FileSystem fs = null;
  OutputStream os = null;
  try {
    fs = cluster.getFileSystem();
    Path filePath = new Path("/testWriteConf.xml");
    os = fs.create(filePath);
    StringBuilder longString = new StringBuilder();
    for (int i = 0; i < 100000; i++) {
      longString.append("hello");
    } // 500KB
    conf.set("foobar", longString.toString());
    conf.writeXml(os);
    os.close();
    os = null;
    fs.close();
    fs = null;
  } finally {
    IOUtils.cleanup(null, os, fs);
    cluster.shutdown();
  }
}
 
Example 15
Project: hadoop   File: CopyMapper.java   Source Code and License Vote up 5 votes
/**
 * Initialize SSL Config if same is set in conf
 *
 * @throws IOException - If any
 */
private void initializeSSLConf(Context context) throws IOException {
  LOG.info("Initializing SSL configuration");

  String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work";
  Path[] cacheFiles = context.getLocalCacheFiles();

  Configuration sslConfig = new Configuration(false);
  String sslConfFileName = conf.get(DistCpConstants.CONF_LABEL_SSL_CONF);
  Path sslClient = findCacheFile(cacheFiles, sslConfFileName);
  if (sslClient == null) {
    LOG.warn("SSL Client config file not found. Was looking for " + sslConfFileName +
        " in " + Arrays.toString(cacheFiles));
    return;
  }
  sslConfig.addResource(sslClient);

  String trustStoreFile = conf.get("ssl.client.truststore.location");
  Path trustStorePath = findCacheFile(cacheFiles, trustStoreFile);
  sslConfig.set("ssl.client.truststore.location", trustStorePath.toString());

  String keyStoreFile = conf.get("ssl.client.keystore.location");
  Path keyStorePath = findCacheFile(cacheFiles, keyStoreFile);
  sslConfig.set("ssl.client.keystore.location", keyStorePath.toString());

  try {
    OutputStream out = new FileOutputStream(workDir + "/" + sslConfFileName);
    try {
      sslConfig.writeXml(out);
    } finally {
      out.close();
    }
    conf.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfFileName);
  } catch (IOException e) {
    LOG.warn("Unable to write out the ssl configuration. " +
        "Will fall back to default ssl-client.xml in class path, if there is one", e);
  }
}
 
Example 16
Project: hadoop   File: KeyStoreTestUtil.java   Source Code and License Vote up 5 votes
/**
 * Saves configuration to a file.
 * 
 * @param file File to save
 * @param conf Configuration contents to write to file
 * @throws IOException if there is an I/O error saving the file
 */
public static void saveConfig(File file, Configuration conf)
    throws IOException {
  Writer writer = new FileWriter(file);
  try {
    conf.writeXml(writer);
  } finally {
    writer.close();
  }
}
 
Example 17
Project: ditb   File: Constraints.java   Source Code and License Vote up 5 votes
/**
 * Write the configuration to a String
 * 
 * @param conf
 *          to write
 * @return String representation of that configuration
 * @throws IOException
 */
private static String serializeConfiguration(Configuration conf)
    throws IOException {
  // write the configuration out to the data stream
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  DataOutputStream dos = new DataOutputStream(bos);
  conf.writeXml(dos);
  dos.flush();
  byte[] data = bos.toByteArray();
  return Bytes.toString(data);
}
 
Example 18
Project: hadoop   File: TestNodeHealthService.java   Source Code and License Vote up 4 votes
@Test
public void testNodeHealthService() throws Exception {
  RecordFactory factory = RecordFactoryProvider.getRecordFactory(null);
  NodeHealthStatus healthStatus =
      factory.newRecordInstance(NodeHealthStatus.class);
  Configuration conf = getConfForNodeHealthScript();
  conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
  conf.addResource(nodeHealthConfigFile.getName());
  writeNodeHealthScriptFile("", true);

  LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
  NodeHealthScriptRunner nodeHealthScriptRunner =
      spy(NodeManager.getNodeHealthScriptRunner(conf));
  NodeHealthCheckerService nodeHealthChecker = new NodeHealthCheckerService(
  		nodeHealthScriptRunner, dirsHandler);
  nodeHealthChecker.init(conf);

  doReturn(true).when(nodeHealthScriptRunner).isHealthy();
  doReturn("").when(nodeHealthScriptRunner).getHealthReport();
  setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
      nodeHealthChecker.getHealthReport(),
      nodeHealthChecker.getLastHealthReportTime());
  LOG.info("Checking initial healthy condition");
  // Check proper report conditions.
  Assert.assertTrue("Node health status reported unhealthy", healthStatus
      .getIsNodeHealthy());
  Assert.assertTrue("Node health status reported unhealthy", healthStatus
      .getHealthReport().equals(nodeHealthChecker.getHealthReport()));

  doReturn(false).when(nodeHealthScriptRunner).isHealthy();   
  // update health status
  setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
      nodeHealthChecker.getHealthReport(),
      nodeHealthChecker.getLastHealthReportTime());
  LOG.info("Checking Healthy--->Unhealthy");
  Assert.assertFalse("Node health status reported healthy", healthStatus
      .getIsNodeHealthy());
  Assert.assertTrue("Node health status reported healthy", healthStatus
      .getHealthReport().equals(nodeHealthChecker.getHealthReport()));

  doReturn(true).when(nodeHealthScriptRunner).isHealthy();
  setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
      nodeHealthChecker.getHealthReport(),
      nodeHealthChecker.getLastHealthReportTime());
  LOG.info("Checking UnHealthy--->healthy");
  // Check proper report conditions.
  Assert.assertTrue("Node health status reported unhealthy", healthStatus
      .getIsNodeHealthy());
  Assert.assertTrue("Node health status reported unhealthy", healthStatus
      .getHealthReport().equals(nodeHealthChecker.getHealthReport()));

  // Healthy to timeout transition.
  doReturn(false).when(nodeHealthScriptRunner).isHealthy();
  doReturn(NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG)
      .when(nodeHealthScriptRunner).getHealthReport();
  setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
      nodeHealthChecker.getHealthReport(),
      nodeHealthChecker.getLastHealthReportTime());
  LOG.info("Checking Healthy--->timeout");
  Assert.assertFalse("Node health status reported healthy even after timeout",
      healthStatus.getIsNodeHealthy());
  Assert.assertTrue("Node script time out message not propogated",
      healthStatus.getHealthReport().equals(
          NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG
          + NodeHealthCheckerService.SEPARATOR
          + nodeHealthChecker.getDiskHandler().getDisksHealthReport(false)));
}
 
Example 19
Project: hadoop   File: JobHistoryEventHandler.java   Source Code and License Vote up 4 votes
/**
 * Create an event writer for the Job represented by the jobID.
 * Writes out the job configuration to the log directory.
 * This should be the first call to history for a job
 * 
 * @param jobId the jobId.
 * @param amStartedEvent
 * @throws IOException
 */
protected void setupEventWriter(JobId jobId, AMStartedEvent amStartedEvent)
    throws IOException {
  if (stagingDirPath == null) {
    LOG.error("Log Directory is null, returning");
    throw new IOException("Missing Log Directory for History");
  }

  MetaInfo oldFi = fileMap.get(jobId);
  Configuration conf = getConfig();

  // TODO Ideally this should be written out to the job dir
  // (.staging/jobid/files - RecoveryService will need to be patched)
  Path historyFile = JobHistoryUtils.getStagingJobHistoryFile(
      stagingDirPath, jobId, startCount);
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  if (user == null) {
    throw new IOException(
        "User is null while setting up jobhistory eventwriter");
  }

  String jobName = context.getJob(jobId).getName();
  EventWriter writer = (oldFi == null) ? null : oldFi.writer;
 
  Path logDirConfPath =
      JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId, startCount);
  if (writer == null) {
    try {
      writer = createEventWriter(historyFile);
      LOG.info("Event Writer setup for JobId: " + jobId + ", File: "
          + historyFile);
    } catch (IOException ioe) {
      LOG.info("Could not create log file: [" + historyFile + "] + for job "
          + "[" + jobName + "]");
      throw ioe;
    }
    
    //Write out conf only if the writer isn't already setup.
    if (conf != null) {
      // TODO Ideally this should be written out to the job dir
      // (.staging/jobid/files - RecoveryService will need to be patched)
      FSDataOutputStream jobFileOut = null;
      try {
        if (logDirConfPath != null) {
          jobFileOut = stagingDirFS.create(logDirConfPath, true);
          conf.writeXml(jobFileOut);
          jobFileOut.close();
        }
      } catch (IOException e) {
        LOG.info("Failed to write the job configuration file", e);
        throw e;
      }
    }
  }

  String queueName = JobConf.DEFAULT_QUEUE_NAME;
  if (conf != null) {
    queueName = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME);
  }

  MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer,
      user, jobName, jobId, amStartedEvent.getForcedJobStateOnShutDown(),
      queueName);
  fi.getJobSummary().setJobId(jobId);
  fi.getJobSummary().setJobLaunchTime(amStartedEvent.getStartTime());
  fi.getJobSummary().setJobSubmitTime(amStartedEvent.getSubmitTime());
  fi.getJobIndexInfo().setJobStartTime(amStartedEvent.getStartTime());
  fi.getJobIndexInfo().setSubmitTime(amStartedEvent.getSubmitTime());
  fileMap.put(jobId, fi);
}
 
Example 20
Project: hadoop   File: TestJobHistoryEventHandler.java   Source Code and License Vote up 4 votes
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
  // Create default configuration pointing to the minicluster
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
          dfsCluster.getURI().toString());
  FileOutputStream os = new FileOutputStream(coreSitePath);
  conf.writeXml(os);
  os.close();

  // simulate execution under a non-default namenode
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
          "file:///");

  TestParams t = new TestParams();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);

  JHEvenHandlerForTest realJheh =
      new JHEvenHandlerForTest(t.mockAppContext, 0, false);
  JHEvenHandlerForTest jheh = spy(realJheh);
  jheh.init(conf);

  try {
    jheh.start();
    handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
        t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));

    handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
        TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
        new Counters(), new Counters())));

    // If we got here then event handler worked but we don't know with which
    // file system. Now we check that history stuff was written to minicluster
    FileSystem dfsFileSystem = dfsCluster.getFileSystem();
    assertTrue("Minicluster contains some history files",
        dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
    FileSystem localFileSystem = LocalFileSystem.get(conf);
    assertFalse("No history directory on non-default file system",
        localFileSystem.exists(new Path(t.dfsWorkDir)));
  } finally {
    jheh.stop();
  }
}