Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks#isUnderConstruction()

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlocks#isUnderConstruction() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
Example 2
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
Example 3
Source File: DistributedRaidFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  // We want to use RAID logic only on instance of DFS.
  if (fs instanceof DistributedFileSystem) {
    DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs;
    LocatedBlocks lbs =
        underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE);
    if (lbs != null) {
      // Use underlying filesystem if the file is under construction.
      if (!lbs.isUnderConstruction()) {
        // Use underlying filesystem if file length is 0.
        final long fileSize = getFileSize(lbs);
        if (fileSize > 0) {
          return new ExtFSDataInputStream(conf, this, f,
            fileSize, getBlockSize(lbs), bufferSize);
        }
      }
    }
  }
  return fs.open(f, bufferSize);
}
 
Example 4
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void checkLocatedBlocks(LocatedBlocks locatedBlocks)
    throws IOException {
  if (null == locatedBlocks) {
    return;
  }
  if(!locatedBlocks.isUnderConstruction()) {
    return;
  }
  List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
  if (lbs == null) {
    return;
  }
  for (int i = 0; i < lbs.size() - 1; i++) {
    if (lbs.get(i).getBlockSize() <= 1) {
      throw new IOException(
          "File is under construction and namenode hasn't received the second last block yet.");
    }
  }
}
 
Example 5
Source File: NamenodeFsck.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void checkForCorruptOpenFiles(
  FileStatus file, List<FileStatus> corruptFiles
) throws IOException {
  String filePath = file.getPath().toUri().getPath();
  
  if (file.isDir()) {
    for (FileStatus fileStatus : nn.namesystem.dir.getListing(filePath)) {
      checkForCorruptOpenFiles(fileStatus, corruptFiles);
    }
    
  } else {
    LeaseManager.Lease lease = 
      nn.getNamesystem().leaseManager.getLeaseByPath(filePath);
    // Condition: 
    //  1. lease has expired hard limit
    //  2. the file is open for write
    //  3. the last block has 0 locations
    if (lease != null && lease.expiredHardLimit()) {
      LocatedBlocks blocks =
        nn.getNamesystem().getBlockLocations(filePath, 0, file.getLen());
      List<LocatedBlock> locatedBlockList = blocks.getLocatedBlocks();
      LocatedBlock lastBlock =
        locatedBlockList.get(locatedBlockList.size() - 1);

      if (blocks.isUnderConstruction() && lastBlock.getLocations().length == 0) {
        corruptFiles.add(file);
      }
    }
  }
}
 
Example 6
Source File: TestLeaseRecovery2.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test makes the client does not renew its lease and also
 * set the hard lease expiration period to be short 1s. Thus triggering
 * lease expiration to happen while the client is still alive.
 * 
 * The test makes sure that the lease recovery completes and the client
 * fails if it continues to write to the file.
 * 
 * @throws Exception
 */
@Test
public void testHardLeaseRecovery() throws Exception {
  //create a file
  String filestr = "/hardLeaseRecovery";
  AppendTestUtil.LOG.info("filestr=" + filestr);
  Path filepath = new Path(filestr);
  FSDataOutputStream stm = dfs.create(filepath, true,
      BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
  assertTrue(dfs.dfs.exists(filestr));

  // write bytes into the file.
  int size = AppendTestUtil.nextInt(FILE_SIZE);
  AppendTestUtil.LOG.info("size=" + size);
  stm.write(buffer, 0, size);

  // hflush file
  AppendTestUtil.LOG.info("hflush");
  stm.hflush();
  
  // kill the lease renewal thread
  AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
  dfs.dfs.getLeaseRenewer().interruptAndJoin();

  // set the hard limit to be 1 second 
  cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
  
  // wait for lease recovery to complete
  LocatedBlocks locatedBlocks;
  do {
    Thread.sleep(SHORT_LEASE_PERIOD);
    locatedBlocks = dfs.dfs.getLocatedBlocks(filestr, 0L, size);
  } while (locatedBlocks.isUnderConstruction());
  assertEquals(size, locatedBlocks.getFileLength());

  // make sure that the writer thread gets killed
  try {
    stm.write('b');
    stm.close();
    fail("Writer thread should have been killed");
  } catch (IOException e) {
    e.printStackTrace();
  }      

  // verify data
  AppendTestUtil.LOG.info(
      "File size is good. Now validating sizes from datanodes...");
  AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
 
Example 7
Source File: TestLeaseRecovery2.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This test makes the client does not renew its lease and also
 * set the hard lease expiration period to be short 1s. Thus triggering
 * lease expiration to happen while the client is still alive.
 * 
 * The test makes sure that the lease recovery completes and the client
 * fails if it continues to write to the file.
 * 
 * @throws Exception
 */
@Test
public void testHardLeaseRecovery() throws Exception {
  //create a file
  String filestr = "/hardLeaseRecovery";
  AppendTestUtil.LOG.info("filestr=" + filestr);
  Path filepath = new Path(filestr);
  FSDataOutputStream stm = dfs.create(filepath, true,
      BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
  assertTrue(dfs.dfs.exists(filestr));

  // write bytes into the file.
  int size = AppendTestUtil.nextInt(FILE_SIZE);
  AppendTestUtil.LOG.info("size=" + size);
  stm.write(buffer, 0, size);

  // hflush file
  AppendTestUtil.LOG.info("hflush");
  stm.hflush();
  
  // kill the lease renewal thread
  AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
  dfs.dfs.getLeaseRenewer().interruptAndJoin();

  // set the hard limit to be 1 second 
  cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
  
  // wait for lease recovery to complete
  LocatedBlocks locatedBlocks;
  do {
    Thread.sleep(SHORT_LEASE_PERIOD);
    locatedBlocks = dfs.dfs.getLocatedBlocks(filestr, 0L, size);
  } while (locatedBlocks.isUnderConstruction());
  assertEquals(size, locatedBlocks.getFileLength());

  // make sure that the writer thread gets killed
  try {
    stm.write('b');
    stm.close();
    fail("Writer thread should have been killed");
  } catch (IOException e) {
    e.printStackTrace();
  }      

  // verify data
  AppendTestUtil.LOG.info(
      "File size is good. Now validating sizes from datanodes...");
  AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
 
Example 8
Source File: TestRaidDfs.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public static void waitForFileRaided(
  Log logger, FileSystem fileSys, Path file, Path destPath, short targetReplication)
throws IOException, InterruptedException {
  FileStatus parityStat = null;
  String fileName = file.getName().toString();
  // wait till file is raided
  while (parityStat == null) {
    logger.info("Waiting for files to be raided.");
    try {
      FileStatus[] listPaths = fileSys.listStatus(destPath);
      if (listPaths != null) {
        for (FileStatus f : listPaths) {
          logger.info("File raided so far : " + f.getPath());
          String found = f.getPath().getName().toString();
          if (fileName.equals(found)) {
            parityStat = f;
            break;
          }
        }
      }
    } catch (FileNotFoundException e) {
      //ignore
    }
    Thread.sleep(1000);                  // keep waiting
  }

  while (true) {
    LocatedBlocks locations = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
    locations = RaidDFSUtil.getBlockLocations(
      dfs, file.toUri().getPath(), 0, parityStat.getLen());
    if (!locations.isUnderConstruction()) {
      break;
    }
    Thread.sleep(1000);
  }

  while (true) {
    FileStatus stat = fileSys.getFileStatus(file);
    if (stat.getReplication() == targetReplication) break;
    Thread.sleep(1000);
  }
}
 
Example 9
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Grab the open-file info from namenode
 */
synchronized void openInfo() throws IOException {
  if (src == null && blocks == null) {
    throw new IOException("No fine provided to open");
  }

  LocatedBlocks newInfo = src != null ? 
                          getLocatedBlocks(src, 0, prefetchSize) : blocks;
  if (newInfo == null) {
    throw new IOException("Cannot open filename " + src);
  }

  // I think this check is not correct. A file could have been appended to
  // between two calls to openInfo().
  if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() &&
      !newInfo.isUnderConstruction()) {
    Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
    Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
    while (oldIter.hasNext() && newIter.hasNext()) {
      if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
        throw new IOException("Blocklist for " + src + " has changed!");
      }
    }
  }

  // if the file is under construction, then fetch size of last block
  // from datanode.
  if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
    LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
    if (last.getLocations().length > 0) {
      try {
        Block newBlock = getBlockInfo(last);
        // only if the block has data (not null)
        if (newBlock != null) {
          long newBlockSize = newBlock.getNumBytes();
          newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
        }
      } catch (IOException e) {
        DFSClient.LOG.debug("DFSClient file " + src + 
                  " is being concurrently append to" +
                  " but datanodes probably does not have block " +
                  last.getBlock(), e);
      }
    }
  }
  this.locatedBlocks = new DFSLocatedBlocks(newInfo);
  this.currentNode = null;
}
 
Example 10
Source File: DFSLocatedBlocks.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public DFSLocatedBlocks(LocatedBlocks lbs) {
  super(lbs.getFileLength(), lbs.getLocatedBlocks(), lbs.isUnderConstruction());
  this.fileLength = lbs.getFileLength();
  lock = new ReentrantReadWriteLock(true); // fair
}