org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedRootComponentOnUpgrade(byte[] component, final int layoutVersion) { // If the LV doesn't support inode IDs, we're doing an upgrade if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) { if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) { Preconditions.checkArgument( renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING), RESERVED_ERROR_MSG); final String renameString = renameReservedMap .get(FSDirectory.DOT_RESERVED_STRING); component = DFSUtil.string2Bytes(renameString); LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING + " to " + renameString); } } return component; }
Example #2
Source File: ImageLoaderCurrent.java From hadoop with Apache License 2.0 | 6 votes |
private void processFileDiff(DataInputStream in, ImageVisitor v, String currentINodeName) throws IOException { int snapshotId = in.readInt(); v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF, ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); if (in.readBoolean()) { v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES); if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { processINodeFileAttributes(in, v, currentINodeName); } else { processINode(in, v, true, currentINodeName, true); } v.leaveEnclosingElement(); } v.leaveEnclosingElement(); }
Example #3
Source File: FSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedRootComponentOnUpgrade(byte[] component, final int layoutVersion) { // If the LV doesn't support inode IDs, we're doing an upgrade if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) { if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) { Preconditions.checkArgument( renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING), RESERVED_ERROR_MSG); final String renameString = renameReservedMap .get(FSDirectory.DOT_RESERVED_STRING); component = DFSUtil.string2Bytes(renameString); LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING + " to " + renameString); } } return component; }
Example #4
Source File: FSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedComponentOnUpgrade(byte[] component, final int layoutVersion) { // If the LV doesn't support snapshots, we're doing an upgrade if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) { if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) { Preconditions.checkArgument( renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR), RESERVED_ERROR_MSG); component = DFSUtil.string2Bytes(renameReservedMap .get(HdfsConstants.DOT_SNAPSHOT_DIR)); } } return component; }
Example #5
Source File: FSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** Load {@link INodeFileAttributes}. */ public INodeFileAttributes loadINodeFileAttributes(DataInput in) throws IOException { final int layoutVersion = getLayoutVersion(); if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { return loadINodeWithLocalName(true, in, false).asFile(); } final byte[] name = FSImageSerialization.readLocalName(in); final PermissionStatus permissions = PermissionStatus.read(in); final long modificationTime = in.readLong(); final long accessTime = in.readLong(); final short replication = namesystem.getBlockManager().adjustReplication( in.readShort()); final long preferredBlockSize = in.readLong(); return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, accessTime, replication, preferredBlockSize, (byte) 0, null); }
Example #6
Source File: FSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) throws IOException { final int layoutVersion = getLayoutVersion(); if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { return loadINodeWithLocalName(true, in, false).asDirectory(); } final byte[] name = FSImageSerialization.readLocalName(in); final PermissionStatus permissions = PermissionStatus.read(in); final long modificationTime = in.readLong(); // Read quotas: quota by storage type does not need to be processed below. // It is handled only in protobuf based FsImagePBINode class for newer // fsImages. Tools using this class such as legacy-mode of offline image viewer // should only load legacy FSImages without newer features. final long nsQuota = in.readLong(); final long dsQuota = in.readLong(); return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy( name, permissions, null, modificationTime, null) : new INodeDirectoryAttributes.CopyWithQuota(name, permissions, null, modificationTime, nsQuota, dsQuota, null, null); }
Example #7
Source File: FSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) throws IOException { final int layoutVersion = getLayoutVersion(); if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { return loadINodeWithLocalName(true, in, false).asDirectory(); } final byte[] name = FSImageSerialization.readLocalName(in); final PermissionStatus permissions = PermissionStatus.read(in); final long modificationTime = in.readLong(); // Read quotas: quota by storage type does not need to be processed below. // It is handled only in protobuf based FsImagePBINode class for newer // fsImages. Tools using this class such as legacy-mode of offline image viewer // should only load legacy FSImages without newer features. final long nsQuota = in.readLong(); final long dsQuota = in.readLong(); return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy( name, permissions, null, modificationTime, null) : new INodeDirectoryAttributes.CopyWithQuota(name, permissions, null, modificationTime, nsQuota, dsQuota, null, null); }
Example #8
Source File: FSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** Load {@link INodeFileAttributes}. */ public INodeFileAttributes loadINodeFileAttributes(DataInput in) throws IOException { final int layoutVersion = getLayoutVersion(); if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { return loadINodeWithLocalName(true, in, false).asFile(); } final byte[] name = FSImageSerialization.readLocalName(in); final PermissionStatus permissions = PermissionStatus.read(in); final long modificationTime = in.readLong(); final long accessTime = in.readLong(); final short replication = namesystem.getBlockManager().adjustReplication( in.readShort()); final long preferredBlockSize = in.readLong(); return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, accessTime, replication, preferredBlockSize, (byte) 0, null); }
Example #9
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "times operation."); } } this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.mtime = FSImageSerialization.readLong(in); this.atime = FSImageSerialization.readLong(in); } else { this.mtime = readLong(in); this.atime = readLong(in); } }
Example #10
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 2) { throw new IOException("Incorrect data format. " + "delete operation."); } } this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #11
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 6 votes |
/** * Construct the reader * @param in The stream to read from. * @param logVersion The version of the data coming from the stream. */ public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) { this.logVersion = logVersion; if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) { this.checksum = DataChecksum.newCrc32(); } else { this.checksum = null; } // It is possible that the logVersion is actually a future layoutversion // during the rolling upgrade (e.g., the NN gets upgraded first). We // assume future layout will also support length of editlog op. this.supportEditLogLength = NameNodeLayoutVersion.supports( NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion) || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; if (this.checksum != null) { this.in = new DataInputStream( new CheckedInputStream(in, this.checksum)); } else { this.in = in; } this.limiter = limiter; this.cache = new OpInstanceCache(); this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; }
Example #12
Source File: FSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedComponentOnUpgrade(byte[] component, final int layoutVersion) { // If the LV doesn't support snapshots, we're doing an upgrade if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) { if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) { Preconditions.checkArgument( renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR), RESERVED_ERROR_MSG); component = DFSUtil.string2Bytes(renameReservedMap .get(HdfsConstants.DOT_SNAPSHOT_DIR)); } } return component; }
Example #13
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 3) { throw new IOException("Incorrect data format. " + "Rename operation."); } } this.src = FSImageSerialization.readString(in); this.dst = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } this.options = readRenameOptions(in); // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #14
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 3) { throw new IOException("Incorrect data format. " + "Old rename operation."); } } this.src = FSImageSerialization.readString(in); this.dst = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #15
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 6 votes |
/** * Construct the reader * @param in The stream to read from. * @param logVersion The version of the data coming from the stream. */ public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) { this.logVersion = logVersion; if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) { this.checksum = DataChecksum.newCrc32(); } else { this.checksum = null; } // It is possible that the logVersion is actually a future layoutversion // during the rolling upgrade (e.g., the NN gets upgraded first). We // assume future layout will also support length of editlog op. this.supportEditLogLength = NameNodeLayoutVersion.supports( NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion) || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; if (this.checksum != null) { this.in = new DataInputStream( new CheckedInputStream(in, this.checksum)); } else { this.in = in; } this.limiter = limiter; this.cache = new OpInstanceCache(); this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; }
Example #16
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 2) { throw new IOException("Incorrect data format. " + "delete operation."); } } this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #17
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 3) { throw new IOException("Incorrect data format. " + "Old rename operation."); } } this.src = FSImageSerialization.readString(in); this.dst = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #18
Source File: ImageLoaderCurrent.java From hadoop with Apache License 2.0 | 6 votes |
/** * Process the INode records stored in the fsimage. * * @param in Datastream to process * @param v Visitor to walk over INodes * @param numInodes Number of INodes stored in file * @param skipBlocks Process all the blocks within the INode? * @param supportSnapshot Whether or not the imageVersion supports snapshot * @throws VisitException * @throws IOException */ private void processINodes(DataInputStream in, ImageVisitor v, long numInodes, boolean skipBlocks, boolean supportSnapshot) throws IOException { v.visitEnclosingElement(ImageElement.INODES, ImageElement.NUM_INODES, numInodes); if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) { if (!supportSnapshot) { processLocalNameINodes(in, v, numInodes, skipBlocks); } else { processLocalNameINodesWithSnapshot(in, v, skipBlocks); } } else { // full path name processFullNameINodes(in, v, numInodes, skipBlocks); } v.leaveEnclosingElement(); // INodes }
Example #19
Source File: ImageLoaderCurrent.java From big-c with Apache License 2.0 | 6 votes |
private void processFileDiff(DataInputStream in, ImageVisitor v, String currentINodeName) throws IOException { int snapshotId = in.readInt(); v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF, ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); if (in.readBoolean()) { v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES); if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { processINodeFileAttributes(in, v, currentINodeName); } else { processINode(in, v, true, currentINodeName, true); } v.leaveEnclosingElement(); } v.leaveEnclosingElement(); }
Example #20
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 6 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 3) { throw new IOException("Incorrect data format. " + "Rename operation."); } } this.src = FSImageSerialization.readString(in); this.dst = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.timestamp = FSImageSerialization.readLong(in); } else { this.timestamp = readLong(in); } this.options = readRenameOptions(in); // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #21
Source File: TestStartupOptionUpgrade.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests the upgrade from one version of Federation to another Federation * version Test without clusterid case: -upgrade * Expected to reuse existing clusterid * * @throws Exception */ @Test public void testStartupOptUpgradeFromFederation() throws Exception { // Test assumes clusterid already exists, set the clusterid storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); }
Example #22
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (this.length != 4) { throw new IOException("Incorrect data format. " + "symlink operation."); } } if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, logVersion)) { this.inodeId = FSImageSerialization.readLong(in); } else { // This id should be updated when the editLogOp is applied this.inodeId = INodeId.GRANDFATHER_INODE_ID; } this.path = FSImageSerialization.readString(in); this.value = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.mtime = FSImageSerialization.readLong(in); this.atime = FSImageSerialization.readLong(in); } else { this.mtime = readLong(in); this.atime = readLong(in); } this.permissionStatus = PermissionStatus.read(in); // read RPC ids if necessary readRpcIds(in, logVersion); }
Example #23
Source File: TestLayoutVersion.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests to make sure a given layout version supports all the * features from the ancestor */ @Test public void testFeaturesFromAncestorSupported() { for (LayoutFeature f : Feature.values()) { validateFeatureList(f); } }
Example #24
Source File: TestStartupOptionUpgrade.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests the upgrade from one version of Federation to another Federation * version Test with wrong clusterid case: -upgrade -clusterid <cid> * Expected to reuse existing clusterid and ignore user given clusterid * * @throws Exception */ @Test public void testStartupOptUpgradeFromFederationWithWrongCID() throws Exception { startOpt.setClusterId("wrong-cid"); storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); }
Example #25
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.replication = FSImageSerialization.readShort(in); } else { this.replication = readShort(in); } }
Example #26
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
private static List<XAttr> readXAttrsFromEditLog(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports(NameNodeLayoutVersion.Feature.XATTRS, logVersion)) { return null; } XAttrEditLogProto proto = XAttrEditLogProto.parseDelimitedFrom(in); return PBHelper.convertXAttrs(proto.getXAttrsList()); }
Example #27
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
private static List<AclEntry> read(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports(Feature.EXTENDED_ACL, logVersion)) { return null; } int size = in.readInt(); if (size == 0) { return null; } List<AclEntry> aclEntries = Lists.newArrayListWithCapacity(size); for (int i = 0; i < size; ++i) { int v = in.read(); int p = v & ACL_EDITLOG_PERM_MASK; int t = (v >> ACL_EDITLOG_ENTRY_TYPE_OFFSET) & ACL_EDITLOG_ENTRY_TYPE_MASK; int s = (v >> ACL_EDITLOG_ENTRY_SCOPE_OFFSET) & ACL_EDITLOG_ENTRY_SCOPE_MASK; boolean hasName = ((v >> ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET) & 1) == 1; String name = hasName ? FSImageSerialization.readString(in) : null; aclEntries.add(new AclEntry.Builder().setName(name) .setPermission(FSACTION_VALUES[p]) .setScope(ACL_ENTRY_SCOPE_VALUES[s]) .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); } return aclEntries; }
Example #28
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
void readRpcIds(DataInputStream in, int logVersion) throws IOException { if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_SUPPORT_RETRYCACHE, logVersion)) { this.rpcClientId = FSImageSerialization.readBytes(in); this.rpcCallId = FSImageSerialization.readInt(in); } }
Example #29
Source File: TestStartupOptionUpgrade.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests the upgrade from one version of Federation to another Federation * version Test with correct clusterid case: -upgrade -clusterid <cid> * Expected to reuse existing clusterid and ignore user given clusterid * * @throws Exception */ @Test public void testStartupOptUpgradeFromFederationWithCID() throws Exception { startOpt.setClusterId("currentcid"); storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); }
Example #30
Source File: TestStartupOptionUpgrade.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests the upgrade from one version of Federation to another Federation * version Test without clusterid case: -upgrade * Expected to reuse existing clusterid * * @throws Exception */ @Test public void testStartupOptUpgradeFromFederation() throws Exception { // Test assumes clusterid already exists, set the clusterid storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); }