Java Code Examples for org.apache.hadoop.hdfs.DFSInputStream#close()

The following examples show how to use org.apache.hadoop.hdfs.DFSInputStream#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WaitingRoom.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void addDirToMaps(Path dir, DFSClient client) throws IOException {
  FileStatus[] children = dfs.listStatus(dir);

  if (children == null) return;

  for (FileStatus child: children) {
    if (!child.isDir()) { // get block ids for file
      Path path = child.getPath(); // paths will be unique
      fileMap.put(path, new ArrayList<Long>());

      DFSInputStream stm = client.open(child.getPath().toUri().getPath());
      LocatedBlocks blocks = stm.fetchLocatedBlocks();
      stm.close();

      for (int i = 0; i < blocks.locatedBlockCount(); i++) {
        Long blockId = blocks.get(i).getBlock().getBlockId();
        fileMap.get(path).add(blockId); // add to file block list
        blockRefMap.put(blockId, null); // mark as unrefereced
      }
    }
    else {
      // If child is a directory, recurse on it
      addDirToMaps(child.getPath(), client);
    }
  }
}
 
Example 2
Source File: TestFsck.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private byte[] cacheInitialContents() throws IOException {
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  byte[] content = new byte[(int)status.getLen()];
  DFSInputStream in = null;
  try {
    in = dfsClient.open(name);
    IOUtils.readFully(in, content, 0, content.length);
  } finally {
    in.close();
  }
  return content;
}
 
Example 3
Source File: TestFsck.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
Example 4
Source File: TestFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
private byte[] cacheInitialContents() throws IOException {
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  byte[] content = new byte[(int)status.getLen()];
  DFSInputStream in = null;
  try {
    in = dfsClient.open(name);
    IOUtils.readFully(in, content, 0, content.length);
  } finally {
    in.close();
  }
  return content;
}
 
Example 5
Source File: TestFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
Example 6
Source File: TestDFSClientUpdateNameNodeSignature.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Test when name-node's finger-print changes, client re-fetch the
 * name-node proxy.
 */
public void testClientUpdateMethodList() throws IOException {
  InetSocketAddress addr = cluster.getNameNode().getNameNodeDNAddress();
  DFSClient client = new DFSClient(addr, cluster.getNameNode().getConf());
  ClientProtocol oldNamenode = client.namenode;
  
  // Client's name-node proxy should keep the same if the same namenode
  // sends the same fingerprint
  //
  OutputStream os = client.create("/testClientUpdateMethodList.txt", true);
  os.write(66);
  os.close();
  TestCase.assertSame(oldNamenode, client.namenode);    
  int oldFingerprint = cluster.getNameNode().getClientProtocolMethodsFingerprint();
  TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
      .getMethodsFingerprint());
  
  // Namenode's fingerprint will be different to client. Client is suppsoed
  // to get a new proxy.
  //
  cluster.getNameNode().setClientProtocolMethodsFingerprint(666);
  os = client.create("/testClientUpdateMethodList1.txt", true);
  os.write(88);
  os.close();
  TestCase.assertNotSame(oldNamenode, client.namenode);
  // Since we didn't change method list of name-node, the fingerprint
  // got from the new proxy should be the same as the previous one.
  TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
      .getMethodsFingerprint());
  
  // Client's name-node proxy should keep the same if the same namenode
  // sends the same fingerprint
  //
  ClientProtocol namenode1 = client.namenode;
  cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
  DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
  int val = dis.read();
  TestCase.assertEquals(66, val);
  dis.close();
  TestCase.assertSame(namenode1, client.namenode);

  // Namenode's fingerprint will be different to client. Client is suppsoed
  // to get a new proxy.
  //
  cluster.getNameNode().setClientProtocolMethodsFingerprint(888);
  dis = client.open("/testClientUpdateMethodList1.txt");
  val = dis.read();
  TestCase.assertEquals(88, val);
  dis.close();
  // Since we didn't change method list of name-node, the fingerprint
  // got from the new proxy should be the same as the previous one.
  TestCase.assertNotSame(namenode1, client.namenode);
}