org.apache.hadoop.util.VersionInfo Java Examples

The following examples show how to use org.apache.hadoop.util.VersionInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DistributedCacheUtilImpl.java    From pentaho-hadoop-shims with Apache License 2.0 9 votes vote down vote up
/**
 * Add an file path to the current set of classpath entries. It adds the file to cache as well.
 * <p/>
 * This is copied from Hadoop 0.20.2 o.a.h.filecache.DistributedCache so we can inject the correct path separator for
 * the environment the cluster is executing in. See {@link #getClusterPathSeparator()}.
 *
 * @param file Path of the file to be added
 * @param conf Configuration that contains the classpath setting
 */
public void addFileToClassPath( Path file, Configuration conf )
  throws IOException {

  // Save off the classloader, to make sure the version info can be loaded successfully from the hadoop-common JAR
  ClassLoader cl = Thread.currentThread().getContextClassLoader();
  Thread.currentThread().setContextClassLoader( VersionInfo.class.getClassLoader() );

  // Restore the original classloader
  Thread.currentThread().setContextClassLoader( cl );

  String classpath = conf.get( "mapred.job.classpath.files" );
  conf.set( "mapred.job.classpath.files", classpath == null ? file.toString()
    : classpath + getClusterPathSeparator() + file.toString() );
  FileSystem fs = FileSystem.get( conf );
  URI uri = fs.makeQualified( file ).toUri();

  org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile( uri, conf );
}
 
Example #2
Source File: ListPathsServlet.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example #3
Source File: TestDatanodeRegister.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
Example #4
Source File: CompactionCombineFileInputFormat.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
/**
 * Set the number of locations in the split to SPLIT_MAX_NUM_LOCATIONS if it is larger than
 * SPLIT_MAX_NUM_LOCATIONS (MAPREDUCE-5186).
 */
private static List<InputSplit> cleanSplits(List<InputSplit> splits) throws IOException {
  if (VersionInfo.getVersion().compareTo("2.3.0") >= 0) {
    // This issue was fixed in 2.3.0, if newer version, no need to clean up splits
    return splits;
  }

  List<InputSplit> cleanedSplits = Lists.newArrayList();

  for (int i = 0; i < splits.size(); i++) {
    CombineFileSplit oldSplit = (CombineFileSplit) splits.get(i);
    String[] locations = oldSplit.getLocations();

    Preconditions.checkNotNull(locations, "CombineFileSplit.getLocations() returned null");

    if (locations.length > SPLIT_MAX_NUM_LOCATIONS) {
      locations = Arrays.copyOf(locations, SPLIT_MAX_NUM_LOCATIONS);
    }

    cleanedSplits.add(new CombineFileSplit(oldSplit.getPaths(), oldSplit.getStartOffsets(), oldSplit.getLengths(),
        locations));
  }
  return cleanedSplits;
}
 
Example #5
Source File: HadoopRecoverableWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems
	if (!"hdfs".equalsIgnoreCase(fs.getScheme())) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS");
	}

	// Part of functionality depends on specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		LOG.warn("WARNING: You are running on hadoop version " + VersionInfo.getVersion() + "." +
				" If your RollingPolicy does not roll on every checkpoint/savepoint, the StreamingFileSink will throw an exception upon recovery.");
	}
}
 
Example #6
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example #7
Source File: SparkShims.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
protected void buildSparkJobUrl(String master,
                                String sparkWebUrl,
                                int jobId,
                                Properties jobProperties,
                                InterpreterContext context) {
  String jobUrl = sparkWebUrl + "/jobs/job?id=" + jobId;
  String version = VersionInfo.getVersion();
  if (master.toLowerCase().contains("yarn") && !supportYarn6615(version)) {
    jobUrl = sparkWebUrl + "/jobs";
  }
  String jobGroupId = jobProperties.getProperty("spark.jobGroup.id");

  Map<String, String> infos = new java.util.HashMap<String, String>();
  infos.put("jobUrl", jobUrl);
  infos.put("label", "SPARK JOB");
  infos.put("tooltip", "View in Spark web UI");
  infos.put("noteId", getNoteId(jobGroupId));
  infos.put("paraId", getParagraphId(jobGroupId));
  LOGGER.debug("Send spark job url: " + infos);
  context.getIntpEventClient().onParaInfosReceived(infos);
}
 
Example #8
Source File: HadoopRecoverableFsDataOutputStream.java    From flink with Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion());
	}

	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example #9
Source File: TestDatanodeRegister.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
Example #10
Source File: ClusterInfo.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
Example #11
Source File: NodeInfo.java    From big-c with Apache License 2.0 6 votes vote down vote up
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
Example #12
Source File: BPServiceActor.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
Example #13
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
Example #14
Source File: FSDataset.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Register the FSDataset MBean using the name
 *        "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
 */
void registerMBean(final String storageId) {
  // We wrap to bypass standard mbean naming convetion.
  // This wraping can be removed in java 6 as it is more flexible in
  // package naming for mbeans and their impl.
  StandardMBean bean;
  String storageName;
  if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage
    storageName = "UndefinedStorageId" + rand.nextInt();
  } else {
    storageName = storageId;
  }
  try {
    bean = new StandardMBean(this,FSDatasetMBean.class);
    mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
    versionBeanName = VersionInfo.registerJMX("DataNode");
  } catch (NotCompliantMBeanException e) {
    e.printStackTrace();
  }

  DataNode.LOG.info("Registered FSDatasetStatusMBean");
}
 
Example #15
Source File: ListPathsServlet.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example #16
Source File: ListPathsServlet.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example #17
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example #18
Source File: TestDatanodeRegister.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
Example #19
Source File: TestDatanodeRegister.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
Example #20
Source File: ListPathsServlet.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example #21
Source File: ShimLoader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
/**
 * Return the "major" version of Hadoop currently on the classpath.
 * Releases in the 1.x and 2.x series are mapped to the appropriate
 * 0.x release series, e.g. 1.x is mapped to "0.20S" and 2.x
 * is mapped to "0.23".
 */
public static String getMajorVersion() {
  String vers = VersionInfo.getVersion();

  String[] parts = vers.split("\\.");
  if (parts.length < 2) {
    throw new RuntimeException("Illegal Hadoop Version: " + vers +
        " (expected A.B.* format)");
  }

  switch (Integer.parseInt(parts[0])) {
  case 2:
  case 3:
    return HADOOP23VERSIONNAME;
  default:
    throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers);
  }
}
 
Example #22
Source File: ClusterInfo.java    From big-c with Apache License 2.0 6 votes vote down vote up
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
Example #23
Source File: MiniCluster.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
public boolean useYarn() {
  String version = VersionInfo.getVersion();
  if (version.startsWith("0.20.") || version.startsWith("1.")) {
    return false;
  }
  // Check for mr1 hadoop2
  if (isMr1Hadoop2()) {
    return false;
  }
  return true;
}
 
Example #24
Source File: YarnPrioritySchedulingITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() {
	assumeTrue(
		"Priority scheduling is not supported by Hadoop: " + VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 8));

	YARN_CONFIGURATION.setStrings("yarn.cluster.max-application-priority", "10");
	startYARNWithConfig(YARN_CONFIGURATION);
}
 
Example #25
Source File: YarnTypes.java    From reef with Apache License 2.0 5 votes vote down vote up
public static boolean isAtOrAfterVersion(final String version) {
  final String hadoopVersion = VersionInfo.getVersion();

  if (hadoopVersion == null || hadoopVersion.length() < version.length()) {
    throw new RuntimeException("unsupported or incomplete hadoop version number provided for comparison: " +
        hadoopVersion);
  }

  return hadoopVersion.substring(0, version.length()).compareTo(version) >= 0;
}
 
Example #26
Source File: SparkShimsTest.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Test
public void runUnderYarnTest() {
  Properties properties = new Properties();
  properties.setProperty("spark.jobGroup.id", "zeppelin|user1|noteId|paragraphId");
  sparkShims.buildSparkJobUrl("yarn", "http://sparkurl", 0, properties, mockContext);

  Map<String, String> mapValue = argumentCaptor.getValue();
  assertTrue(mapValue.keySet().contains("jobUrl"));

  if (sparkShims.supportYarn6615(VersionInfo.getVersion())) {
    assertTrue(mapValue.get("jobUrl").contains("/jobs/job?id="));
  } else {
    assertFalse(mapValue.get("jobUrl").contains("/jobs/job?id="));
  }
}
 
Example #27
Source File: RegisterApplicationMasterResponseReflectorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetContainersFromPreviousAttemptsMethodReflectiveHadoop22() {
	assumeTrue(
		"Method getContainersFromPreviousAttempts is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 2));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	assertTrue(registerApplicationMasterResponseReflector.getGetContainersFromPreviousAttemptsMethod().isPresent());
}
 
Example #28
Source File: TestNMWebServices.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void verifyNodeInfoGeneric(String id, String healthReport,
    long totalVmemAllocatedContainersMB, long totalPmemAllocatedContainersMB,
    long totalVCoresAllocatedContainers,
    boolean vmemCheckEnabled, boolean pmemCheckEnabled,
    long lastNodeUpdateTime, Boolean nodeHealthy, String nodeHostName,
    String hadoopVersionBuiltOn, String hadoopBuildVersion,
    String hadoopVersion, String resourceManagerVersionBuiltOn,
    String resourceManagerBuildVersion, String resourceManagerVersion) {

  WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:8042", id);
  WebServicesTestUtils.checkStringMatch("healthReport", "Healthy",
      healthReport);
  assertEquals("totalVmemAllocatedContainersMB incorrect", 15872,
      totalVmemAllocatedContainersMB);
  assertEquals("totalPmemAllocatedContainersMB incorrect", 16384,
      totalPmemAllocatedContainersMB);
  assertEquals("totalVCoresAllocatedContainers incorrect", 4000,
      totalVCoresAllocatedContainers);
  assertEquals("vmemCheckEnabled incorrect",  true, vmemCheckEnabled);
  assertEquals("pmemCheckEnabled incorrect",  true, pmemCheckEnabled);
  assertTrue("lastNodeUpdateTime incorrect", lastNodeUpdateTime == nmContext
      .getNodeHealthStatus().getLastHealthReportTime());
  assertTrue("nodeHealthy isn't true", nodeHealthy);
  WebServicesTestUtils.checkStringMatch("nodeHostName", "testhost.foo.com",
      nodeHostName);

  WebServicesTestUtils.checkStringMatch("hadoopVersionBuiltOn",
      VersionInfo.getDate(), hadoopVersionBuiltOn);
  WebServicesTestUtils.checkStringEqual("hadoopBuildVersion",
      VersionInfo.getBuildVersion(), hadoopBuildVersion);
  WebServicesTestUtils.checkStringMatch("hadoopVersion",
      VersionInfo.getVersion(), hadoopVersion);

  WebServicesTestUtils.checkStringMatch("resourceManagerVersionBuiltOn",
      YarnVersionInfo.getDate(), resourceManagerVersionBuiltOn);
  WebServicesTestUtils.checkStringEqual("resourceManagerBuildVersion",
      YarnVersionInfo.getBuildVersion(), resourceManagerBuildVersion);
  WebServicesTestUtils.checkStringMatch("resourceManagerVersion",
      YarnVersionInfo.getVersion(), resourceManagerVersion);
}
 
Example #29
Source File: RegisterApplicationMasterResponseReflectorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetSchedulerResourceTypesMethodReflectiveHadoop26() {
	assumeTrue(
		"Method getSchedulerResourceTypes is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 6));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	assertTrue(registerApplicationMasterResponseReflector.getGetSchedulerResourceTypesMethod().isPresent());
}
 
Example #30
Source File: TestHsWebServices.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void verifyHsInfoGeneric(String hadoopVersionBuiltOn,
    String hadoopBuildVersion, String hadoopVersion, long startedon) {
  WebServicesTestUtils.checkStringMatch("hadoopVersionBuiltOn",
      VersionInfo.getDate(), hadoopVersionBuiltOn);
  WebServicesTestUtils.checkStringEqual("hadoopBuildVersion",
      VersionInfo.getBuildVersion(), hadoopBuildVersion);
  WebServicesTestUtils.checkStringMatch("hadoopVersion",
      VersionInfo.getVersion(), hadoopVersion);
  assertEquals("startedOn doesn't match: ",
      JobHistoryServer.historyServerTimeStamp, startedon);
}