Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#byteArray2PathString()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#byteArray2PathString() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * load fsimage files assuming full path names are stored
 * 
 * @param numFiles total number of files to load
 * @param in data input stream
 * @param counter Counter to increment for namenode startup progress
 * @throws IOException if any error occurs
 */
private void loadFullNameINodes(long numFiles, DataInput in, Counter counter)
    throws IOException {
  byte[][] pathComponents;
  byte[][] parentPath = {{}};      
  FSDirectory fsDir = namesystem.dir;
  INodeDirectory parentINode = fsDir.rootDir;
  for (long i = 0; i < numFiles; i++) {
    pathComponents = FSImageSerialization.readPathComponents(in);
    for (int j=0; j < pathComponents.length; j++) {
      byte[] newComponent = renameReservedComponentOnUpgrade
          (pathComponents[j], getLayoutVersion());
      if (!Arrays.equals(newComponent, pathComponents[j])) {
        String oldPath = DFSUtil.byteArray2PathString(pathComponents);
        pathComponents[j] = newComponent;
        String newPath = DFSUtil.byteArray2PathString(pathComponents);
        LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
      }
    }
    final INode newNode = loadINode(
        pathComponents[pathComponents.length-1], false, in, counter);

    if (isRoot(pathComponents)) { // it is the root
      // update the root's attributes
      updateRootAttr(newNode.asDirectory());
      continue;
    }

    namesystem.dir.addToInodeMap(newNode);
    // check if the new inode belongs to the same parent
    if(!isParent(pathComponents, parentPath)) {
      parentINode = getParentINodeDirectory(pathComponents);
      parentPath = getParent(pathComponents);
    }

    // add new inode
    addToParent(parentINode, newNode);
  }
}
 
Example 2
Source File: FSImageFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * load fsimage files assuming full path names are stored
 * 
 * @param numFiles total number of files to load
 * @param in data input stream
 * @param counter Counter to increment for namenode startup progress
 * @throws IOException if any error occurs
 */
private void loadFullNameINodes(long numFiles, DataInput in, Counter counter)
    throws IOException {
  byte[][] pathComponents;
  byte[][] parentPath = {{}};      
  FSDirectory fsDir = namesystem.dir;
  INodeDirectory parentINode = fsDir.rootDir;
  for (long i = 0; i < numFiles; i++) {
    pathComponents = FSImageSerialization.readPathComponents(in);
    for (int j=0; j < pathComponents.length; j++) {
      byte[] newComponent = renameReservedComponentOnUpgrade
          (pathComponents[j], getLayoutVersion());
      if (!Arrays.equals(newComponent, pathComponents[j])) {
        String oldPath = DFSUtil.byteArray2PathString(pathComponents);
        pathComponents[j] = newComponent;
        String newPath = DFSUtil.byteArray2PathString(pathComponents);
        LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
      }
    }
    final INode newNode = loadINode(
        pathComponents[pathComponents.length-1], false, in, counter);

    if (isRoot(pathComponents)) { // it is the root
      // update the root's attributes
      updateRootAttr(newNode.asDirectory());
      continue;
    }

    namesystem.dir.addToInodeMap(newNode);
    // check if the new inode belongs to the same parent
    if(!isParent(pathComponents, parentPath)) {
      parentINode = getParentINodeDirectory(pathComponents);
      parentPath = getParent(pathComponents);
    }

    // add new inode
    addToParent(parentINode, newNode);
  }
}
 
Example 3
Source File: INodesInPath.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** @return the full path in string form */
public String getPath() {
  return DFSUtil.byteArray2PathString(path);
}
 
Example 4
Source File: INodesInPath.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public String getPath(int pos) {
  return DFSUtil.byteArray2PathString(path, 0, pos);
}
 
Example 5
Source File: INodesInPath.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** @return the full path in string form */
public String getPath() {
  return DFSUtil.byteArray2PathString(path);
}
 
Example 6
Source File: INodesInPath.java    From big-c with Apache License 2.0 4 votes vote down vote up
public String getPath(int pos) {
  return DFSUtil.byteArray2PathString(path, 0, pos);
}
 
Example 7
Source File: RangerHdfsAuthorizer.java    From ranger with Apache License 2.0 4 votes vote down vote up
private AuthzStatus traverseOnlyCheck(INode inode, INodeAttributes[] inodeAttrs, String path, byte[][] components, INode parent, INode ancestor, int ancestorIndex,
									  String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) {

	if (LOG.isDebugEnabled()) {
		LOG.debug("==> RangerAccessControlEnforcer.traverseOnlyCheck("
				+ "path=" + path + ", user=" + user + ", groups=" + groups + ")");
	}
	final AuthzStatus ret;

	INode nodeToCheck = inode;
	INodeAttributes nodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null;
	boolean skipAuditOnAllow = false;

	String resourcePath = path;
	if (nodeToCheck == null || nodeToCheck.isFile()) {
		skipAuditOnAllow = true;
		if (parent != null) {
			nodeToCheck = parent;
			nodeAttribs = inodeAttrs.length > 1 ? inodeAttrs[inodeAttrs.length - 2] : null;
			resourcePath = inodeAttrs.length > 0 ? DFSUtil.byteArray2PathString(components, 0, inodeAttrs.length - 1) : HDFS_ROOT_FOLDER_PATH;
		} else if (ancestor != null) {
			nodeToCheck = ancestor;
			nodeAttribs = inodeAttrs.length > ancestorIndex ? inodeAttrs[ancestorIndex] : null;
			resourcePath = nodeAttribs != null ? DFSUtil.byteArray2PathString(components, 0, ancestorIndex+1) : HDFS_ROOT_FOLDER_PATH;
		}
	}

	if (nodeToCheck != null) {
		if (resourcePath.length() > 1) {
			if (resourcePath.endsWith(HDFS_ROOT_FOLDER_PATH)) {
				resourcePath = resourcePath.substring(0, resourcePath.length()-1);
			}
		}
		ret = isAccessAllowedForTraversal(nodeToCheck, nodeAttribs, resourcePath, user, groups, plugin, auditHandler, skipAuditOnAllow);
	} else {
		ret = AuthzStatus.ALLOW;
	}
	if (LOG.isDebugEnabled()) {
		LOG.debug("<== RangerAccessControlEnforcer.traverseOnlyCheck("
				+ "path=" + path + ", resourcePath=" + resourcePath + ", user=" + user + ", groups=" + groups + ") : " + ret);
	}
	return ret;
}