org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestOfflineEditsViewer.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") private static ImmutableSet<FSEditLogOpCodes> skippedOps() { ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet.builder(); // Deprecated opcodes b.add(FSEditLogOpCodes.OP_DATANODE_ADD) .add(FSEditLogOpCodes.OP_DATANODE_REMOVE) .add(FSEditLogOpCodes.OP_SET_NS_QUOTA) .add(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA) .add(FSEditLogOpCodes.OP_SET_GENSTAMP_V1); // Cannot test delegation token related code in insecure set up b.add(FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN) .add(FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN) .add(FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN); // Skip invalid opcode b.add(FSEditLogOpCodes.OP_INVALID); return b.build(); }
Example #2
Source File: TestOfflineEditsViewer.java From big-c with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") private static ImmutableSet<FSEditLogOpCodes> skippedOps() { ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet.builder(); // Deprecated opcodes b.add(FSEditLogOpCodes.OP_DATANODE_ADD) .add(FSEditLogOpCodes.OP_DATANODE_REMOVE) .add(FSEditLogOpCodes.OP_SET_NS_QUOTA) .add(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA) .add(FSEditLogOpCodes.OP_SET_GENSTAMP_V1); // Cannot test delegation token related code in insecure set up b.add(FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN) .add(FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN) .add(FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN); // Skip invalid opcode b.add(FSEditLogOpCodes.OP_INVALID); return b.build(); }
Example #3
Source File: StatisticsEditsVisitor.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get the statistics in string format, suitable for printing * * @return statistics in in string format, suitable for printing */ public String getStatisticsString() { StringBuffer sb = new StringBuffer(); sb.append(String.format( " %-30.30s : %d%n", "VERSION", version)); for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) { sb.append(String.format( " %-30.30s (%3d): %d%n", opCode.toString(), opCode.getOpCode(), opCodeCount.get(opCode))); } return sb.toString(); }
Example #4
Source File: TestOfflineEditsViewer.java From RDFS with Apache License 2.0 | 5 votes |
/** * Compare two files, ignore trailing zeros at the end, * for edits log the trailing zeros do not make any difference, * throw exception is the files are not same * * @param filenameSmall first file to compare (doesn't have to be smaller) * @param filenameLarge second file to compare (doesn't have to be larger) */ private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException { ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall)); ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge)); // now correct if it's otherwise if(small.capacity() > large.capacity()) { ByteBuffer tmpByteBuffer = small; small = large; large = tmpByteBuffer; String tmpFilename = filenameSmall; filenameSmall = filenameLarge; filenameLarge = tmpFilename; } // compare from 0 to capacity of small // the rest of the large should be all zeros small.position(0); small.limit(small.capacity()); large.position(0); large.limit(small.capacity()); // compares position to limit if(!small.equals(large)) { return false; } // everything after limit should be 0xFF int i = large.limit(); large.clear(); for(; i < large.capacity(); i++) { if(large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) { return false; } } return true; }
Example #5
Source File: QJMTestUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Verify that the given list of streams contains exactly the range of * transactions specified, inclusive. */ public static void verifyEdits(List<EditLogInputStream> streams, int firstTxnId, int lastTxnId) throws IOException { Iterator<EditLogInputStream> iter = streams.iterator(); assertTrue(iter.hasNext()); EditLogInputStream stream = iter.next(); for (int expected = firstTxnId; expected <= lastTxnId; expected++) { FSEditLogOp op = stream.readOp(); while (op == null) { assertTrue("Expected to find txid " + expected + ", " + "but no more streams available to read from", iter.hasNext()); stream = iter.next(); op = stream.readOp(); } assertEquals(FSEditLogOpCodes.OP_MKDIR, op.opCode); assertEquals(expected, op.getTransactionId()); } assertNull(stream.readOp()); assertFalse("Expected no more txns after " + lastTxnId + " but more streams are available", iter.hasNext()); }
Example #6
Source File: QJMTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify that the given list of streams contains exactly the range of * transactions specified, inclusive. */ public static void verifyEdits(List<EditLogInputStream> streams, int firstTxnId, int lastTxnId) throws IOException { Iterator<EditLogInputStream> iter = streams.iterator(); assertTrue(iter.hasNext()); EditLogInputStream stream = iter.next(); for (int expected = firstTxnId; expected <= lastTxnId; expected++) { FSEditLogOp op = stream.readOp(); while (op == null) { assertTrue("Expected to find txid " + expected + ", " + "but no more streams available to read from", iter.hasNext()); stream = iter.next(); op = stream.readOp(); } assertEquals(FSEditLogOpCodes.OP_MKDIR, op.opCode); assertEquals(expected, op.getTransactionId()); } assertNull(stream.readOp()); assertFalse("Expected no more txns after " + lastTxnId + " but more streams are available", iter.hasNext()); }
Example #7
Source File: StatisticsEditsVisitor.java From big-c with Apache License 2.0 | 5 votes |
/** * Increment the op code counter * * @param opCode opCode for which to increment count */ private void incrementOpCodeCount(FSEditLogOpCodes opCode) { if(!opCodeCount.containsKey(opCode)) { opCodeCount.put(opCode, 0L); } Long newValue = opCodeCount.get(opCode) + 1; opCodeCount.put(opCode, newValue); }
Example #8
Source File: StatisticsEditsVisitor.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the statistics in string format, suitable for printing * * @return statistics in in string format, suitable for printing */ public String getStatisticsString() { StringBuffer sb = new StringBuffer(); sb.append(String.format( " %-30.30s : %d%n", "VERSION", version)); for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) { sb.append(String.format( " %-30.30s (%3d): %d%n", opCode.toString(), opCode.getOpCode(), opCodeCount.get(opCode))); } return sb.toString(); }
Example #9
Source File: StatisticsEditsVisitor.java From hadoop with Apache License 2.0 | 5 votes |
/** * Increment the op code counter * * @param opCode opCode for which to increment count */ private void incrementOpCodeCount(FSEditLogOpCodes opCode) { if(!opCodeCount.containsKey(opCode)) { opCodeCount.put(opCode, 0L); } Long newValue = opCodeCount.get(opCode) + 1; opCodeCount.put(opCode, newValue); }
Example #10
Source File: EditsLoaderCurrent.java From RDFS with Apache License 2.0 | 4 votes |
/** * Visit OP_ADD and OP_CLOSE, they are almost the same * * @param editsOpCode op code to visit */ private void visit_OP_ADD_or_OP_CLOSE(FSEditLogOpCodes editsOpCode) throws IOException { visitTxId(); IntToken opAddLength = v.visitInt(EditsElement.LENGTH); // this happens if the edits is not properly ended (-1 op code), // it is padded at the end with all zeros, OP_ADD is zero so // without this check we would treat all zeros as empty OP_ADD) if(opAddLength.value == 0) { throw new IOException("OpCode " + editsOpCode + " has zero length (corrupted edits)"); } v.visitStringUTF8(EditsElement.PATH); v.visitStringUTF8(EditsElement.REPLICATION); v.visitStringUTF8(EditsElement.MTIME); v.visitStringUTF8(EditsElement.ATIME); v.visitStringUTF8(EditsElement.BLOCKSIZE); // now read blocks IntToken numBlocksToken = v.visitInt(EditsElement.NUMBLOCKS); for (int i = 0; i < numBlocksToken.value; i++) { v.visitEnclosingElement(EditsElement.BLOCK); v.visitLong(EditsElement.BLOCK_ID); v.visitLong(EditsElement.BLOCK_NUM_BYTES); v.visitLong(EditsElement.BLOCK_GENERATION_STAMP); v.leaveEnclosingElement(); } // PERMISSION_STATUS v.visitEnclosingElement(EditsElement.PERMISSION_STATUS); v.visitStringText( EditsElement.USERNAME); v.visitStringText( EditsElement.GROUPNAME); v.visitShort( EditsElement.FS_PERMISSIONS); v.leaveEnclosingElement(); if(editsOpCode == FSEditLogOpCodes.OP_ADD) { v.visitStringUTF8(EditsElement.CLIENT_NAME); v.visitStringUTF8(EditsElement.CLIENT_MACHINE); } }
Example #11
Source File: EditsLoaderCurrent.java From RDFS with Apache License 2.0 | 4 votes |
/** * Visit OP_CLOSE */ private void visit_OP_CLOSE() throws IOException { visit_OP_ADD_or_OP_CLOSE(FSEditLogOpCodes.OP_CLOSE); }
Example #12
Source File: EditsLoaderCurrent.java From RDFS with Apache License 2.0 | 4 votes |
/** * Visit OP_ADD */ private void visit_OP_ADD() throws IOException { visit_OP_ADD_or_OP_CLOSE(FSEditLogOpCodes.OP_ADD); }
Example #13
Source File: TestOfflineEditsViewer.java From big-c with Apache License 2.0 | 4 votes |
/** * Compare two files, ignore trailing zeros at the end, for edits log the * trailing zeros do not make any difference, throw exception is the files are * not same * * @param filenameSmall first file to compare (doesn't have to be smaller) * @param filenameLarge second file to compare (doesn't have to be larger) */ private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException { ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall)); ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge)); // OEV outputs with the latest layout version, so tweak the old file's // contents to have latest version so checkedin binary files don't // require frequent updates small.put(3, (byte)NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); // now correct if it's otherwise if (small.capacity() > large.capacity()) { ByteBuffer tmpByteBuffer = small; small = large; large = tmpByteBuffer; String tmpFilename = filenameSmall; filenameSmall = filenameLarge; filenameLarge = tmpFilename; } // compare from 0 to capacity of small // the rest of the large should be all zeros small.position(0); small.limit(small.capacity()); large.position(0); large.limit(small.capacity()); // compares position to limit if (!small.equals(large)) { return false; } // everything after limit should be 0xFF int i = large.limit(); large.clear(); for (; i < large.capacity(); i++) { if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) { return false; } } return true; }
Example #14
Source File: TestFileAppendRestart.java From big-c with Apache License 2.0 | 4 votes |
/** * Regression test for HDFS-2991. Creates and appends to files * where blocks start/end on block boundaries. */ @Test public void testAppendRestart() throws Exception { final Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); MiniDFSCluster cluster = null; FSDataOutputStream stream = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem fs = cluster.getFileSystem(); File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1)); EnumMap<FSEditLogOpCodes, Holder<Integer>> counts; Path p1 = new Path("/block-boundaries"); writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE); counts = FSImageTestUtil.countEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to reopen file // OP_ADD_BLOCK for second block // OP_CLOSE to close file assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); Path p2 = new Path("/not-block-boundaries"); writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE); counts = FSImageTestUtil.countEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to re-establish the lease // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block) // OP_ADD_BLOCK at the start of the second block // OP_CLOSE to close file // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs // in addition to the ones above assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held); assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held); assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); cluster.restartNameNode(); AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE); AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2); } finally { IOUtils.closeStream(stream); if (cluster != null) { cluster.shutdown(); } } }
Example #15
Source File: TestOfflineEditsViewer.java From hadoop with Apache License 2.0 | 4 votes |
/** * Compare two files, ignore trailing zeros at the end, for edits log the * trailing zeros do not make any difference, throw exception is the files are * not same * * @param filenameSmall first file to compare (doesn't have to be smaller) * @param filenameLarge second file to compare (doesn't have to be larger) */ private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException { ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall)); ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge)); // OEV outputs with the latest layout version, so tweak the old file's // contents to have latest version so checkedin binary files don't // require frequent updates small.put(3, (byte)NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); // now correct if it's otherwise if (small.capacity() > large.capacity()) { ByteBuffer tmpByteBuffer = small; small = large; large = tmpByteBuffer; String tmpFilename = filenameSmall; filenameSmall = filenameLarge; filenameLarge = tmpFilename; } // compare from 0 to capacity of small // the rest of the large should be all zeros small.position(0); small.limit(small.capacity()); large.position(0); large.limit(small.capacity()); // compares position to limit if (!small.equals(large)) { return false; } // everything after limit should be 0xFF int i = large.limit(); large.clear(); for (; i < large.capacity(); i++) { if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) { return false; } } return true; }
Example #16
Source File: TestFileAppendRestart.java From hadoop with Apache License 2.0 | 4 votes |
/** * Regression test for HDFS-2991. Creates and appends to files * where blocks start/end on block boundaries. */ @Test public void testAppendRestart() throws Exception { final Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); MiniDFSCluster cluster = null; FSDataOutputStream stream = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem fs = cluster.getFileSystem(); File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1)); EnumMap<FSEditLogOpCodes, Holder<Integer>> counts; Path p1 = new Path("/block-boundaries"); writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE); counts = FSImageTestUtil.countEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to reopen file // OP_ADD_BLOCK for second block // OP_CLOSE to close file assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); Path p2 = new Path("/not-block-boundaries"); writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE); counts = FSImageTestUtil.countEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to re-establish the lease // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block) // OP_ADD_BLOCK at the start of the second block // OP_CLOSE to close file // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs // in addition to the ones above assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held); assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held); assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); cluster.restartNameNode(); AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE); AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2); } finally { IOUtils.closeStream(stream); if (cluster != null) { cluster.shutdown(); } } }
Example #17
Source File: EditsLoaderCurrent.java From RDFS with Apache License 2.0 | 4 votes |
private void visitOpCode(FSEditLogOpCodes editsOpCode) throws IOException { switch(editsOpCode) { case OP_INVALID: // -1 visit_OP_INVALID(); break; case OP_ADD: // 0 visit_OP_ADD(); break; case OP_CLOSE: // 9 visit_OP_CLOSE(); break; case OP_RENAME: visit_OP_RENAME_OLD(); break; case OP_DELETE: // 2 visit_OP_DELETE(); break; case OP_MKDIR: // 3 visit_OP_MKDIR(); break; case OP_SET_REPLICATION: // 4 visit_OP_SET_REPLICATION(); break; case OP_SET_PERMISSIONS: // 7 visit_OP_SET_PERMISSIONS(); break; case OP_SET_OWNER: // 8 visit_OP_SET_OWNER(); break; case OP_SET_GENSTAMP: // 10 visit_OP_SET_GENSTAMP(); break; case OP_TIMES: // 13 visit_OP_TIMES(); break; case OP_SET_QUOTA: // 14 visit_OP_SET_QUOTA(); break; case OP_RENAME_15: // 15 visit_OP_RENAME(); break; case OP_CONCAT_DELETE: // 16 visit_OP_CONCAT_DELETE(); break; case OP_SYMLINK: visit_OP_SYMLINK(); break; case OP_GET_DELEGATION_TOKEN: visit_OP_GET_DELEGATION_TOKEN(); break; case OP_RENEW_DELEGATION_TOKEN: visit_OP_RENEW_DELEGATION_TOKEN(); break; case OP_CANCEL_DELEGATION_TOKEN: visit_OP_CANCEL_DELEGATION_TOKEN(); break; case OP_UPDATE_MASTER_KEY: visit_OP_UPDATE_MASTER_KEY(); break; case OP_REASSIGN_LEASE: visit_OP_REASSIGN_LEASE(); break; case OP_END_LOG_SEGMENT: // 23 visit_OP_END_LOG_SEGMENT(); break; case OP_START_LOG_SEGMENT: // 24 visit_OP_BEGIN_LOG_SEGMENT(); break; default: { throw new IOException("Unknown op code " + editsOpCode); } } }
Example #18
Source File: EditsLoaderCurrent.java From RDFS with Apache License 2.0 | 4 votes |
/** * Loads edits file, uses visitor to process all elements */ @Override public void loadEdits() throws IOException { try { v.start(); v.visitEnclosingElement(EditsElement.EDITS); IntToken editsVersionToken = v.visitInt(EditsElement.EDITS_VERSION); editsVersion = editsVersionToken.value; if(!canLoadVersion(editsVersion)) { throw new IOException("Cannot process editLog version " + editsVersionToken.value); } FSEditLogOpCodes editsOpCode; do { v.visitEnclosingElement(EditsElement.RECORD); ByteToken opCodeToken; try { opCodeToken = v.visitByte(EditsElement.OPCODE); } catch (EOFException eof) { // Getting EOF when reading the opcode is fine -- // it's just a finalized edits file // Just fake the OP_INVALID here. opCodeToken = new ByteToken(EditsElement.OPCODE); opCodeToken.fromByte(FSEditLogOpCodes.OP_INVALID.getOpCode()); v.visit(opCodeToken); } editsOpCode = FSEditLogOpCodes.fromByte(opCodeToken.value); v.visitEnclosingElement(EditsElement.DATA); visitOpCode(editsOpCode); v.leaveEnclosingElement(); // DATA if (editsOpCode != FSEditLogOpCodes.OP_INVALID && LayoutVersion.supports(Feature.EDITS_CHESKUM, editsVersion)) { v.visitInt(EditsElement.CHECKSUM); } v.leaveEnclosingElement(); // RECORD } while(editsOpCode != FSEditLogOpCodes.OP_INVALID); v.leaveEnclosingElement(); // EDITS v.finish(); } catch(IOException e) { // Tell the visitor to clean up, then re-throw the exception v.finishAbnormally(); throw e; } }
Example #19
Source File: TestDFSInotifyEventInputStream.java From big-c with Apache License 2.0 | 2 votes |
/** * If this test fails, check whether the newly added op should map to an * inotify event, and if so, establish the mapping in * {@link org.apache.hadoop.hdfs.server.namenode.InotifyFSEditLogOpTranslator} * and update testBasic() to include the new op. */ @Test public void testOpcodeCount() { Assert.assertEquals(50, FSEditLogOpCodes.values().length); }
Example #20
Source File: StatisticsEditsVisitor.java From big-c with Apache License 2.0 | 2 votes |
/** * Get statistics * * @return statistics, map of counts per opCode */ public Map<FSEditLogOpCodes, Long> getStatistics() { return opCodeCount; }
Example #21
Source File: TestDFSInotifyEventInputStream.java From hadoop with Apache License 2.0 | 2 votes |
/** * If this test fails, check whether the newly added op should map to an * inotify event, and if so, establish the mapping in * {@link org.apache.hadoop.hdfs.server.namenode.InotifyFSEditLogOpTranslator} * and update testBasic() to include the new op. */ @Test public void testOpcodeCount() { Assert.assertEquals(50, FSEditLogOpCodes.values().length); }
Example #22
Source File: StatisticsEditsVisitor.java From hadoop with Apache License 2.0 | 2 votes |
/** * Get statistics * * @return statistics, map of counts per opCode */ public Map<FSEditLogOpCodes, Long> getStatistics() { return opCodeCount; }