org.apache.hadoop.hdfs.util.Holder Java Examples

The following examples show how to use org.apache.hadoop.hdfs.util.Holder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestEditLogFileInputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
 
Example #2
Source File: FSImageTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * @see #countEditLogOpTypes(File)
 */
public static EnumMap<FSEditLogOpCodes, Holder<Integer>> countEditLogOpTypes(
    EditLogInputStream elis) throws IOException {
  EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
      new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
  
  FSEditLogOp op;
  while ((op = elis.readOp()) != null) {
    Holder<Integer> i = opCounts.get(op.opCode);
    if (i == null) {
      i = new Holder<Integer>(0);
      opCounts.put(op.opCode, i);
    }
    i.held++;
  }
  return opCounts;
}
 
Example #3
Source File: TestQJMWithFaults.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
  
  long firstTxId = txid;
  long lastAcked = txid - 1;
  try {
    EditLogOutputStream stm = qjm.startLogSegment(txid,
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    
    for (int i = 0; i < numTxns; i++) {
      QJMTestUtil.writeTxns(stm, txid++, 1);
      lastAcked++;
    }
    
    stm.close();
    qjm.finalizeLogSegment(firstTxId, lastAcked);
  } catch (Throwable t) {
    thrown.held = t;
  }
  return lastAcked;
}
 
Example #4
Source File: TestQJMWithFaults.java    From big-c with Apache License 2.0 6 votes vote down vote up
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
  
  long firstTxId = txid;
  long lastAcked = txid - 1;
  try {
    EditLogOutputStream stm = qjm.startLogSegment(txid,
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    
    for (int i = 0; i < numTxns; i++) {
      QJMTestUtil.writeTxns(stm, txid++, 1);
      lastAcked++;
    }
    
    stm.close();
    qjm.finalizeLogSegment(firstTxId, lastAcked);
  } catch (Throwable t) {
    thrown.held = t;
  }
  return lastAcked;
}
 
Example #5
Source File: FSImageTestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * @see #countEditLogOpTypes(File)
 */
public static EnumMap<FSEditLogOpCodes, Holder<Integer>> countEditLogOpTypes(
    EditLogInputStream elis) throws IOException {
  EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
      new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
  
  FSEditLogOp op;
  while ((op = elis.readOp()) != null) {
    Holder<Integer> i = opCounts.get(op.opCode);
    if (i == null) {
      i = new Holder<Integer>(0);
      opCounts.put(op.opCode, i);
    }
    i.held++;
  }
  return opCounts;
}
 
Example #6
Source File: TestEditLogFileInputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
 
Example #7
Source File: FSEditLog.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public static void dumpOpCounts(
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  StringBuilder sb = new StringBuilder();
  sb.append("Summary of operations loaded from edit log:\n  ");
  sb.append(opCounts);
  FSImage.LOG.debug(sb.toString());
}
 
Example #8
Source File: FSEditLogLoader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void dumpOpCounts(
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  StringBuilder sb = new StringBuilder();
  sb.append("Summary of operations loaded from edit log:\n  ");
  Joiner.on("\n  ").withKeyValueSeparator("=").appendTo(sb, opCounts);
  FSImage.LOG.debug(sb.toString());
}
 
Example #9
Source File: FSEditLog.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static void incrOpCount(FSEditLogOpCodes opCode,
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  Holder<Integer> holder = opCounts.get(opCode);
  if (holder == null) {
    holder = new Holder<Integer>(1);
    opCounts.put(opCode, holder);
  } else {
    holder.held++;
  }
}
 
Example #10
Source File: FSEditLogLoader.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static void incrOpCount(FSEditLogOpCodes opCode,
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  Holder<Integer> holder = opCounts.get(opCode);
  if (holder == null) {
    holder = new Holder<Integer>(1);
    opCounts.put(opCode, holder);
  } else {
    holder.held++;
  }
}
 
Example #11
Source File: FSEditLogLoader.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static void dumpOpCounts(
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  StringBuilder sb = new StringBuilder();
  sb.append("Summary of operations loaded from edit log:\n  ");
  Joiner.on("\n  ").withKeyValueSeparator("=").appendTo(sb, opCounts);
  FSImage.LOG.debug(sb.toString());
}
 
Example #12
Source File: FSImageTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @param editLog a path of an edit log file
 * @return the count of each type of operation in the log file
 * @throws Exception if there is an error reading it
 */
public static EnumMap<FSEditLogOpCodes,Holder<Integer>> countEditLogOpTypes(
    File editLog) throws Exception {
  EditLogInputStream elis = new EditLogFileInputStream(editLog);
  try {
    return countEditLogOpTypes(elis);
  } finally {
    IOUtils.closeStream(elis);
  }
}
 
Example #13
Source File: FSEditLogLoader.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void incrOpCount(FSEditLogOpCodes opCode,
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts, Step step,
    Counter counter) {
  Holder<Integer> holder = opCounts.get(opCode);
  if (holder == null) {
    holder = new Holder<Integer>(1);
    opCounts.put(opCode, holder);
  } else {
    holder.held++;
  }
  counter.increment();
}
 
Example #14
Source File: FSEditLogLoader.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void dumpOpCounts(
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
  StringBuilder sb = new StringBuilder();
  sb.append("Summary of operations loaded from edit log:\n  ");
  Joiner.on("\n  ").withKeyValueSeparator("=").appendTo(sb, opCounts);
  FSImage.LOG.debug(sb.toString());
}
 
Example #15
Source File: FSImageTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @param editLog a path of an edit log file
 * @return the count of each type of operation in the log file
 * @throws Exception if there is an error reading it
 */
public static EnumMap<FSEditLogOpCodes,Holder<Integer>> countEditLogOpTypes(
    File editLog) throws Exception {
  EditLogInputStream elis = new EditLogFileInputStream(editLog);
  try {
    return countEditLogOpTypes(elis);
  } finally {
    IOUtils.closeStream(elis);
  }
}
 
Example #16
Source File: FSEditLogLoader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void incrOpCount(FSEditLogOpCodes opCode,
    EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts, Step step,
    Counter counter) {
  Holder<Integer> holder = opCounts.get(opCode);
  if (holder == null) {
    holder = new Holder<Integer>(1);
    opCounts.put(opCode, holder);
  } else {
    holder.held++;
  }
  counter.increment();
}
 
Example #17
Source File: TestFileAppendRestart.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    FileSystem fs = cluster.getFileSystem();
    File editLog =
      new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
          NNStorage.getInProgressEditsFileName(1));
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
    
    Path p1 = new Path("/block-boundaries");
    writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);

    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to reopen file
    // OP_ADD_BLOCK for second block
    // OP_CLOSE to close file
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

    Path p2 = new Path("/not-block-boundaries");
    writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to re-establish the lease
    // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
    // OP_ADD_BLOCK at the start of the second block
    // OP_CLOSE to close file
    // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
     //       in addition to the ones above
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
    
    cluster.restartNameNode();
    
    AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
    AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
  } finally {
    IOUtils.closeStream(stream);
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #18
Source File: TestQJMWithFaults.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test case in which three JournalNodes randomly flip flop between
 * up and down states every time they get an RPC.
 * 
 * The writer keeps track of the latest ACKed edit, and on every
 * recovery operation, ensures that it recovers at least to that
 * point or higher. Since at any given point, a majority of JNs
 * may be injecting faults, any writer operation is allowed to fail,
 * so long as the exception message indicates it failed due to injected
 * faults.
 * 
 * Given a random seed, the test should be entirely deterministic.
 */
@Test
public void testRandomized() throws Exception {
  long seed;
  Long userSpecifiedSeed = Long.getLong(RAND_SEED_PROPERTY);
  if (userSpecifiedSeed != null) {
    LOG.info("Using seed specified in system property");
    seed = userSpecifiedSeed;
    
    // If the user specifies a seed, then we should gather all the
    // IPC trace information so that debugging is easier. This makes
    // the test run about 25% slower otherwise.
    ((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
  } else {
    seed = new Random().nextLong();
  }
  LOG.info("Random seed: " + seed);
  
  Random r = new Random(seed);
  
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
    .build();
  
  // Format the cluster using a non-faulty QJM.
  QuorumJournalManager qjmForInitialFormat =
      createInjectableQJM(cluster);
  qjmForInitialFormat.format(FAKE_NSINFO);
  qjmForInitialFormat.close();
  
  try {
    long txid = 0;
    long lastAcked = 0;
    
    for (int i = 0; i < NUM_WRITER_ITERS; i++) {
      LOG.info("Starting writer " + i + "\n-------------------");
      
      QuorumJournalManager qjm = createRandomFaultyQJM(cluster, r);
      try {
        long recovered;
        try {
          recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        } catch (Throwable t) {
          LOG.info("Failed recovery", t);
          checkException(t);
          continue;
        }
        assertTrue("Recovered only up to txnid " + recovered +
            " but had gotten an ack for " + lastAcked,
            recovered >= lastAcked);
        
        txid = recovered + 1;
        
        // Periodically purge old data on disk so it's easier to look
        // at failure cases.
        if (txid > 100 && i % 10 == 1) {
          qjm.purgeLogsOlderThan(txid - 100);
        }

        Holder<Throwable> thrown = new Holder<Throwable>(null);
        for (int j = 0; j < SEGMENTS_PER_WRITER; j++) {
          lastAcked = writeSegmentUntilCrash(cluster, qjm, txid, 4, thrown);
          if (thrown.held != null) {
            LOG.info("Failed write", thrown.held);
            checkException(thrown.held);
            break;
          }
          txid += 4;
        }
      } finally {
        qjm.close();
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #19
Source File: FSEditLogLoader.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("deprecation")
int loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit)
    throws IOException {
  FSDirectory fsDir = fsNamesys.dir;
  int numEdits = 0;

  EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
    new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);

  fsNamesys.writeLock();
  fsDir.writeLock();

  long recentOpcodeOffsets[] = new long[2];
  Arrays.fill(recentOpcodeOffsets, -1);

  try {
    try {
      FSEditLogOp op;
      while ((op = in.readOp()) != null) {
        if (logVersion <= FSConstants.STORED_TXIDS) {
          long diskTxid = op.txid;
          if (diskTxid != currentTxId) {
            if (fsNamesys.failOnTxIdMismatch()) {
              throw new IOException("The transaction id in the edit log : "
                  + diskTxid + " does not match the transaction id inferred"
                  + " from FSIMAGE : " + currentTxId);
            } else {
              FSNamesystem.LOG.error("The transaction id in the edit log : "
                  + diskTxid + " does not match the transaction id inferred"
                  + " from FSIMAGE : " + currentTxId +
                  ", continuing with transaction id : " + diskTxid);
              currentTxId = diskTxid;
            }
          }
        }
        
        loadEditRecord(logVersion, 
            in, 
            recentOpcodeOffsets, 
            opCounts, 
            fsNamesys,
            fsDir, 
            numEdits, 
            op);
        currentTxId++;
        numEdits++;
      }
    } finally {
      if(closeOnExit)
        in.close();
    }
  } catch (Throwable t) {
    // Catch Throwable because in the case of a truly corrupt edits log, any
    // sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
    StringBuilder sb = new StringBuilder();
    sb.append("Error replaying edit log at offset " + in.getPosition());
    if (recentOpcodeOffsets[0] != -1) {
      Arrays.sort(recentOpcodeOffsets);
      sb.append("\nRecent opcode offsets:");
      for (long offset : recentOpcodeOffsets) {
        if (offset != -1) {
          sb.append(' ').append(offset);
        }
      }
    }
    String errorMessage = sb.toString();
    FSImage.LOG.error(errorMessage);
    throw new IOException(errorMessage, t);
  } finally {
    fsDir.writeUnlock();
    fsNamesys.writeUnlock();
  }
  if (FSImage.LOG.isDebugEnabled()) {
    dumpOpCounts(opCounts);
  }
  return numEdits;
}
 
Example #20
Source File: TestQJMWithFaults.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test case in which three JournalNodes randomly flip flop between
 * up and down states every time they get an RPC.
 * 
 * The writer keeps track of the latest ACKed edit, and on every
 * recovery operation, ensures that it recovers at least to that
 * point or higher. Since at any given point, a majority of JNs
 * may be injecting faults, any writer operation is allowed to fail,
 * so long as the exception message indicates it failed due to injected
 * faults.
 * 
 * Given a random seed, the test should be entirely deterministic.
 */
@Test
public void testRandomized() throws Exception {
  long seed;
  Long userSpecifiedSeed = Long.getLong(RAND_SEED_PROPERTY);
  if (userSpecifiedSeed != null) {
    LOG.info("Using seed specified in system property");
    seed = userSpecifiedSeed;
    
    // If the user specifies a seed, then we should gather all the
    // IPC trace information so that debugging is easier. This makes
    // the test run about 25% slower otherwise.
    ((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
  } else {
    seed = new Random().nextLong();
  }
  LOG.info("Random seed: " + seed);
  
  Random r = new Random(seed);
  
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
    .build();
  
  // Format the cluster using a non-faulty QJM.
  QuorumJournalManager qjmForInitialFormat =
      createInjectableQJM(cluster);
  qjmForInitialFormat.format(FAKE_NSINFO);
  qjmForInitialFormat.close();
  
  try {
    long txid = 0;
    long lastAcked = 0;
    
    for (int i = 0; i < NUM_WRITER_ITERS; i++) {
      LOG.info("Starting writer " + i + "\n-------------------");
      
      QuorumJournalManager qjm = createRandomFaultyQJM(cluster, r);
      try {
        long recovered;
        try {
          recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        } catch (Throwable t) {
          LOG.info("Failed recovery", t);
          checkException(t);
          continue;
        }
        assertTrue("Recovered only up to txnid " + recovered +
            " but had gotten an ack for " + lastAcked,
            recovered >= lastAcked);
        
        txid = recovered + 1;
        
        // Periodically purge old data on disk so it's easier to look
        // at failure cases.
        if (txid > 100 && i % 10 == 1) {
          qjm.purgeLogsOlderThan(txid - 100);
        }

        Holder<Throwable> thrown = new Holder<Throwable>(null);
        for (int j = 0; j < SEGMENTS_PER_WRITER; j++) {
          lastAcked = writeSegmentUntilCrash(cluster, qjm, txid, 4, thrown);
          if (thrown.held != null) {
            LOG.info("Failed write", thrown.held);
            checkException(thrown.held);
            break;
          }
          txid += 4;
        }
      } finally {
        qjm.close();
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #21
Source File: TestFileAppendRestart.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    FileSystem fs = cluster.getFileSystem();
    File editLog =
      new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
          NNStorage.getInProgressEditsFileName(1));
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
    
    Path p1 = new Path("/block-boundaries");
    writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);

    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to reopen file
    // OP_ADD_BLOCK for second block
    // OP_CLOSE to close file
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

    Path p2 = new Path("/not-block-boundaries");
    writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to re-establish the lease
    // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
    // OP_ADD_BLOCK at the start of the second block
    // OP_CLOSE to close file
    // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
     //       in addition to the ones above
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
    
    cluster.restartNameNode();
    
    AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
    AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
  } finally {
    IOUtils.closeStream(stream);
    if (cluster != null) { cluster.shutdown(); }
  }
}