Java Code Examples for org.apache.hadoop.util.VersionInfo

The following examples show how to use org.apache.hadoop.util.VersionInfo. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: ClusterInfo.java    License: Apache License 2.0 6 votes vote down vote up
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
Example 2
Source Project: hadoop   Source File: ListPathsServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example 3
Source Project: hadoop   Source File: BPServiceActor.java    License: Apache License 2.0 6 votes vote down vote up
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
Example 4
Source Project: hadoop   Source File: NNThroughputBenchmark.java    License: Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example 5
Source Project: hadoop   Source File: TestDatanodeRegister.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
Example 6
Source Project: hadoop   Source File: TestDatanodeRegister.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
Example 7
Source Project: dremio-oss   Source File: ShimLoader.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Return the "major" version of Hadoop currently on the classpath.
 * Releases in the 1.x and 2.x series are mapped to the appropriate
 * 0.x release series, e.g. 1.x is mapped to "0.20S" and 2.x
 * is mapped to "0.23".
 */
public static String getMajorVersion() {
  String vers = VersionInfo.getVersion();

  String[] parts = vers.split("\\.");
  if (parts.length < 2) {
    throw new RuntimeException("Illegal Hadoop Version: " + vers +
        " (expected A.B.* format)");
  }

  switch (Integer.parseInt(parts[0])) {
  case 2:
  case 3:
    return HADOOP23VERSIONNAME;
  default:
    throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers);
  }
}
 
Example 8
Source Project: big-c   Source File: NodeInfo.java    License: Apache License 2.0 6 votes vote down vote up
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
Example 9
Source Project: big-c   Source File: ClusterInfo.java    License: Apache License 2.0 6 votes vote down vote up
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
Example 10
Source Project: big-c   Source File: ListPathsServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example 11
Source Project: big-c   Source File: BPServiceActor.java    License: Apache License 2.0 6 votes vote down vote up
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
Example 12
Source Project: big-c   Source File: NNThroughputBenchmark.java    License: Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example 13
Source Project: big-c   Source File: TestDatanodeRegister.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
Example 14
Source Project: big-c   Source File: TestDatanodeRegister.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
Example 15
Source Project: zeppelin   Source File: SparkShims.java    License: Apache License 2.0 6 votes vote down vote up
protected void buildSparkJobUrl(String master,
                                String sparkWebUrl,
                                int jobId,
                                Properties jobProperties,
                                InterpreterContext context) {
  String jobUrl = sparkWebUrl + "/jobs/job?id=" + jobId;
  String version = VersionInfo.getVersion();
  if (master.toLowerCase().contains("yarn") && !supportYarn6615(version)) {
    jobUrl = sparkWebUrl + "/jobs";
  }
  String jobGroupId = jobProperties.getProperty("spark.jobGroup.id");

  Map<String, String> infos = new java.util.HashMap<String, String>();
  infos.put("jobUrl", jobUrl);
  infos.put("label", "SPARK JOB");
  infos.put("tooltip", "View in Spark web UI");
  infos.put("noteId", getNoteId(jobGroupId));
  infos.put("paraId", getParagraphId(jobGroupId));
  LOGGER.debug("Send spark job url: " + infos);
  context.getIntpEventClient().onParaInfosReceived(infos);
}
 
Example 16
Source Project: flink   Source File: HadoopRecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion());
	}

	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example 17
Source Project: flink   Source File: HadoopRecoverableWriter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems
	if (!"hdfs".equalsIgnoreCase(fs.getScheme())) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS");
	}

	// Part of functionality depends on specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		LOG.warn("WARNING: You are running on hadoop version " + VersionInfo.getVersion() + "." +
				" If your RollingPolicy does not roll on every checkpoint/savepoint, the StreamingFileSink will throw an exception upon recovery.");
	}
}
 
Example 18
/**
 * Set the number of locations in the split to SPLIT_MAX_NUM_LOCATIONS if it is larger than
 * SPLIT_MAX_NUM_LOCATIONS (MAPREDUCE-5186).
 */
private static List<InputSplit> cleanSplits(List<InputSplit> splits) throws IOException {
  if (VersionInfo.getVersion().compareTo("2.3.0") >= 0) {
    // This issue was fixed in 2.3.0, if newer version, no need to clean up splits
    return splits;
  }

  List<InputSplit> cleanedSplits = Lists.newArrayList();

  for (int i = 0; i < splits.size(); i++) {
    CombineFileSplit oldSplit = (CombineFileSplit) splits.get(i);
    String[] locations = oldSplit.getLocations();

    Preconditions.checkNotNull(locations, "CombineFileSplit.getLocations() returned null");

    if (locations.length > SPLIT_MAX_NUM_LOCATIONS) {
      locations = Arrays.copyOf(locations, SPLIT_MAX_NUM_LOCATIONS);
    }

    cleanedSplits.add(new CombineFileSplit(oldSplit.getPaths(), oldSplit.getStartOffsets(), oldSplit.getLengths(),
        locations));
  }
  return cleanedSplits;
}
 
Example 19
Source Project: hadoop-gpu   Source File: ListPathsServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example 20
Source Project: RDFS   Source File: ListPathsServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
Example 21
Source Project: RDFS   Source File: FSDataset.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Register the FSDataset MBean using the name
 *        "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
 */
void registerMBean(final String storageId) {
  // We wrap to bypass standard mbean naming convetion.
  // This wraping can be removed in java 6 as it is more flexible in
  // package naming for mbeans and their impl.
  StandardMBean bean;
  String storageName;
  if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage
    storageName = "UndefinedStorageId" + rand.nextInt();
  } else {
    storageName = storageId;
  }
  try {
    bean = new StandardMBean(this,FSDatasetMBean.class);
    mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
    versionBeanName = VersionInfo.registerJMX("DataNode");
  } catch (NotCompliantMBeanException e) {
    e.printStackTrace();
  }

  DataNode.LOG.info("Registered FSDatasetStatusMBean");
}
 
Example 22
/**
 * Add an file path to the current set of classpath entries. It adds the file to cache as well.
 * <p/>
 * This is copied from Hadoop 0.20.2 o.a.h.filecache.DistributedCache so we can inject the correct path separator for
 * the environment the cluster is executing in. See {@link #getClusterPathSeparator()}.
 *
 * @param file Path of the file to be added
 * @param conf Configuration that contains the classpath setting
 */
public void addFileToClassPath( Path file, Configuration conf )
  throws IOException {

  // Save off the classloader, to make sure the version info can be loaded successfully from the hadoop-common JAR
  ClassLoader cl = Thread.currentThread().getContextClassLoader();
  Thread.currentThread().setContextClassLoader( VersionInfo.class.getClassLoader() );

  // Restore the original classloader
  Thread.currentThread().setContextClassLoader( cl );

  String classpath = conf.get( "mapred.job.classpath.files" );
  conf.set( "mapred.job.classpath.files", classpath == null ? file.toString()
    : classpath + getClusterPathSeparator() + file.toString() );
  FileSystem fs = FileSystem.get( conf );
  URI uri = fs.makeQualified( file ).toUri();

  org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile( uri, conf );
}
 
Example 23
Source Project: Flink-CEPplus   Source File: RollingSinkSecuredITCase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Skips all tests if the Hadoop version doesn't match.
 * We can't run this test class until HDFS-9213 is fixed which allows a secure DataNode
 * to bind to non-privileged ports for testing.
 * For now, we skip this test class until Hadoop version 3.x.x.
 */
private static void skipIfHadoopVersionIsNotAppropriate() {
	// Skips all tests if the Hadoop version doesn't match
	String hadoopVersionString = VersionInfo.getVersion();
	String[] split = hadoopVersionString.split("\\.");
	if (split.length != 3) {
		throw new IllegalStateException("Hadoop version was not of format 'X.X.X': " + hadoopVersionString);
	}
	Assume.assumeTrue(
		// check whether we're running Hadoop version >= 3.x.x
		Integer.parseInt(split[0]) >= 3
	);
}
 
Example 24
@Test
public void testGetMethodReflectiveHadoop22() {
	assumeTrue(
		"Method getContainersFromPreviousAttempts is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 2));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	final Method method = registerApplicationMasterResponseReflector.getMethod();
	assertThat(method, notNullValue());
}
 
Example 25
Source Project: Flink-CEPplus   Source File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the Hadoop dependency is at least of the given version.
 */
public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException {
	String versionString = VersionInfo.getVersion();
	String[] versionParts = versionString.split("\\.");

	if (versionParts.length < 2) {
		throw new FlinkRuntimeException(
				"Cannot determine version of Hadoop, unexpected version string: " + versionString);
	}

	int maj = Integer.parseInt(versionParts[0]);
	int min = Integer.parseInt(versionParts[1]);

	return maj > major || (maj == major && min >= minor);
}
 
Example 26
/**
 * This test needs to be skipped for earlier Hadoop versions because those
 * have a bug.
 */
@Override
public void testMkdirsFailsForExistingFile() throws Exception {
	final String versionString = VersionInfo.getVersion();
	final String prefix = versionString.substring(0, 3);
	final float version = Float.parseFloat(prefix);
	Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);

	super.testMkdirsFailsForExistingFile();
}
 
Example 27
@Test
public void testGetMethodReflectiveHadoop22() {
	assumeTrue(
		"Method getContainersFromPreviousAttempts is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 2));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	final Method method = registerApplicationMasterResponseReflector.getMethod();
	assertThat(method, notNullValue());
}
 
Example 28
Source Project: flink   Source File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the Hadoop dependency is at least of the given version.
 */
public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException {
	String versionString = VersionInfo.getVersion();
	String[] versionParts = versionString.split("\\.");

	if (versionParts.length < 2) {
		throw new FlinkRuntimeException(
				"Cannot determine version of Hadoop, unexpected version string: " + versionString);
	}

	int maj = Integer.parseInt(versionParts[0]);
	int min = Integer.parseInt(versionParts[1]);

	return maj > major || (maj == major && min >= minor);
}
 
Example 29
Source Project: flink   Source File: HadoopLocalFileSystemBehaviorTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This test needs to be skipped for earlier Hadoop versions because those
 * have a bug.
 */
@Override
public void testMkdirsFailsForExistingFile() throws Exception {
	final String versionString = VersionInfo.getVersion();
	final String prefix = versionString.substring(0, 3);
	final float version = Float.parseFloat(prefix);
	Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);

	super.testMkdirsFailsForExistingFile();
}
 
Example 30
Source Project: hadoop   Source File: NodeInfo.java    License: Apache License 2.0 5 votes vote down vote up
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.totalGCoresAllocatedContainers = resourceView
        .getGCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }