Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#string2Bytes()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#string2Bytes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestINodeFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetFullPathName() {
  replication = 3;
  preferredBlockSize = 128*1024*1024;
  INodeFile inf = createINodeFile(replication, preferredBlockSize);
  inf.setLocalName(DFSUtil.string2Bytes("f"));

  INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
      INodeDirectory.ROOT_NAME, perm, 0L);
  INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
      DFSUtil.string2Bytes("d"), perm, 0L);

  assertEquals("f", inf.getFullPathName());

  dir.addChild(inf);
  assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
  
  root.addChild(dir);
  assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
  assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());

  assertEquals(Path.SEPARATOR, root.getFullPathName());
}
 
Example 2
Source File: FSImageTestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Create an aborted in-progress log in the given directory, containing
 * only a specified number of "mkdirs" operations.
 */
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
    long firstTxId, long newInodeId) throws IOException {
  FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
  editLog.setNextTxId(firstTxId);
  editLog.openForWrite();
  
  PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
      FsPermission.createImmutable((short)0755));
  for (int i = 1; i <= numDirs; i++) {
    String dirName = "dir" + i;
    INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
        DFSUtil.string2Bytes(dirName), perms, 0L);
    editLog.logMkDir("/" + dirName, dir);
  }
  editLog.logSync();
  editLog.abortCurrentLogSegment();
}
 
Example 3
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
 
Example 4
Source File: FSImageFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
 
Example 5
Source File: TestJsonUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
 
Example 6
Source File: FSImageTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Create an aborted in-progress log in the given directory, containing
 * only a specified number of "mkdirs" operations.
 */
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
    long firstTxId, long newInodeId) throws IOException {
  FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
  editLog.setNextTxId(firstTxId);
  editLog.openForWrite();
  
  PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
      FsPermission.createImmutable((short)0755));
  for (int i = 1; i <= numDirs; i++) {
    String dirName = "dir" + i;
    INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
        DFSUtil.string2Bytes(dirName), perms, 0L);
    editLog.logMkDir("/" + dirName, dir);
  }
  editLog.logSync();
  editLog.abortCurrentLogSegment();
}
 
Example 7
Source File: TestINodeFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetFullPathName() {
  replication = 3;
  preferredBlockSize = 128*1024*1024;
  INodeFile inf = createINodeFile(replication, preferredBlockSize);
  inf.setLocalName(DFSUtil.string2Bytes("f"));

  INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
      INodeDirectory.ROOT_NAME, perm, 0L);
  INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
      DFSUtil.string2Bytes("d"), perm, 0L);

  assertEquals("f", inf.getFullPathName());

  dir.addChild(inf);
  assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
  
  root.addChild(dir);
  assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
  assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());

  assertEquals(Path.SEPARATOR, root.getFullPathName());
}
 
Example 8
Source File: INode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Convert strings to byte arrays for path components. */
static byte[][] getPathComponents(String[] strings) {
  if (strings.length == 0) {
    return new byte[][]{null};
  }
  byte[][] bytes = new byte[strings.length][];
  for (int i = 0; i < strings.length; i++)
    bytes[i] = DFSUtil.string2Bytes(strings[i]);
  return bytes;
}
 
Example 9
Source File: JsonUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
 
Example 10
Source File: FSDirSnapshotOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Verify if the snapshot name is legal. */
static void verifySnapshotName(FSDirectory fsd, String snapshotName,
    String path)
    throws FSLimitException.PathComponentTooLongException {
  if (snapshotName.contains(Path.SEPARATOR)) {
    throw new HadoopIllegalArgumentException(
        "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
  }
  final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
  fsd.verifyINodeName(bytes);
  fsd.verifyMaxComponentLength(bytes, path);
}
 
Example 11
Source File: JsonUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
 
Example 12
Source File: TestNestedSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link Snapshot#ID_COMPARATOR}.
 */
@Test (timeout=300000)
public void testIdCmp() {
  final PermissionStatus perm = PermissionStatus.createImmutable(
      "user", "group", FsPermission.createImmutable((short)0));
  final INodeDirectory snapshottable = new INodeDirectory(0,
      DFSUtil.string2Bytes("foo"), perm, 0L);
  snapshottable.addSnapshottableFeature();
  final Snapshot[] snapshots = {
    new Snapshot(1, "s1", snapshottable),
    new Snapshot(1, "s1", snapshottable),
    new Snapshot(2, "s2", snapshottable),
    new Snapshot(2, "s2", snapshottable),
  };

  Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
  for(Snapshot s : snapshots) {
    Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
    Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
    
    for(Snapshot t : snapshots) {
      final int expected = s.getRoot().getLocalName().compareTo(
          t.getRoot().getLocalName());
      final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
      Assert.assertEquals(expected > 0, computed > 0);
      Assert.assertEquals(expected == 0, computed == 0);
      Assert.assertEquals(expected < 0, computed < 0);
    }
  }
}
 
Example 13
Source File: INode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Convert strings to byte arrays for path components. */
static byte[][] getPathComponents(String[] strings) {
  if (strings.length == 0) {
    return new byte[][]{null};
  }
  byte[][] bytes = new byte[strings.length][];
  for (int i = 0; i < strings.length; i++)
    bytes[i] = DFSUtil.string2Bytes(strings[i]);
  return bytes;
}
 
Example 14
Source File: FSDirSnapshotOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Verify if the snapshot name is legal. */
static void verifySnapshotName(FSDirectory fsd, String snapshotName,
    String path)
    throws FSLimitException.PathComponentTooLongException {
  if (snapshotName.contains(Path.SEPARATOR)) {
    throw new HadoopIllegalArgumentException(
        "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
  }
  final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
  fsd.verifyINodeName(bytes);
  fsd.verifyMaxComponentLength(bytes, path);
}
 
Example 15
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {

  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;

  Configuration conf = new Configuration();
  NameNode namenode = mock(NameNode.class);
  NetworkTopology nettop = mock(NetworkTopology.class);
  Map<String,String[]> pmap = new HashMap<String, String[]>();
  Writer result = new StringWriter();
  PrintWriter out = new PrintWriter(result, true);
  InetAddress remoteAddress = InetAddress.getLocalHost();
  FSNamesystem fsName = mock(FSNamesystem.class);
  BlockManager blockManager = mock(BlockManager.class);
  DatanodeManager dnManager = mock(DatanodeManager.class);

  when(namenode.getNamesystem()).thenReturn(fsName);
  when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
                                anyLong(), anyLong(),
                                anyBoolean(), anyBoolean()))
      .thenThrow(new FileNotFoundException());
  when(fsName.getBlockManager()).thenReturn(blockManager);
  when(blockManager.getDatanodeManager()).thenReturn(dnManager);

  NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
      NUM_REPLICAS, remoteAddress);

  String pathString = "/tmp/testFile";

  long length = 123L;
  boolean isDir = false;
  int blockReplication = 1;
  long blockSize = 128 *1024L;
  long modTime = 123123123L;
  long accessTime = 123123120L;
  FsPermission perms = FsPermission.getDefault();
  String owner = "foo";
  String group = "bar";
  byte [] symlink = null;
  byte [] path = new byte[128];
  path = DFSUtil.string2Bytes(pathString);
  long fileId = 312321L;
  int numChildren = 1;
  byte storagePolicy = 0;

  HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
      blockSize, modTime, accessTime, perms, owner, group, symlink, path,
      fileId, numChildren, null, storagePolicy);
  Result res = new Result(conf);

  try {
    fsck.check(pathString, file, res);
  } catch (Exception e) {
    fail("Unexpected exception "+ e.getMessage());
  }
  assertTrue(res.toString().contains("HEALTHY"));
}
 
Example 16
Source File: TestDiff.java    From big-c with Apache License 2.0 4 votes vote down vote up
static INode newINode(int n, int width) {
  byte[] name = DFSUtil.string2Bytes(String.format("n%0" + width + "d", n));
  return new INodeDirectory(n, name, PERM, 0L);
}
 
Example 17
Source File: INodeSymlink.java    From big-c with Apache License 2.0 4 votes vote down vote up
INodeSymlink(long id, byte[] name, PermissionStatus permissions,
    long mtime, long atime, String symlink) {
  super(id, name, permissions, mtime, atime);
  this.symlink = DFSUtil.string2Bytes(symlink);
}
 
Example 18
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {

  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;

  Configuration conf = new Configuration();
  NameNode namenode = mock(NameNode.class);
  NetworkTopology nettop = mock(NetworkTopology.class);
  Map<String,String[]> pmap = new HashMap<String, String[]>();
  Writer result = new StringWriter();
  PrintWriter out = new PrintWriter(result, true);
  InetAddress remoteAddress = InetAddress.getLocalHost();
  FSNamesystem fsName = mock(FSNamesystem.class);
  BlockManager blockManager = mock(BlockManager.class);
  DatanodeManager dnManager = mock(DatanodeManager.class);

  when(namenode.getNamesystem()).thenReturn(fsName);
  when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
                                anyLong(), anyLong(),
                                anyBoolean(), anyBoolean()))
      .thenThrow(new FileNotFoundException());
  when(fsName.getBlockManager()).thenReturn(blockManager);
  when(blockManager.getDatanodeManager()).thenReturn(dnManager);

  NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
      NUM_REPLICAS, remoteAddress);

  String pathString = "/tmp/testFile";

  long length = 123L;
  boolean isDir = false;
  int blockReplication = 1;
  long blockSize = 128 *1024L;
  long modTime = 123123123L;
  long accessTime = 123123120L;
  FsPermission perms = FsPermission.getDefault();
  String owner = "foo";
  String group = "bar";
  byte [] symlink = null;
  byte [] path = new byte[128];
  path = DFSUtil.string2Bytes(pathString);
  long fileId = 312321L;
  int numChildren = 1;
  byte storagePolicy = 0;

  HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
      blockSize, modTime, accessTime, perms, owner, group, symlink, path,
      fileId, numChildren, null, storagePolicy);
  Result res = new Result(conf);

  try {
    fsck.check(pathString, file, res);
  } catch (Exception e) {
    fail("Unexpected exception "+ e.getMessage());
  }
  assertTrue(res.toString().contains("HEALTHY"));
}
 
Example 19
Source File: INode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Set local file name
 */
void setLocalName(String name) {
  this.name = DFSUtil.string2Bytes(name);
}
 
Example 20
Source File: INodeSymlink.java    From hadoop with Apache License 2.0 4 votes vote down vote up
INodeSymlink(long id, byte[] name, PermissionStatus permissions,
    long mtime, long atime, String symlink) {
  super(id, name, permissions, mtime, atime);
  this.symlink = DFSUtil.string2Bytes(symlink);
}