org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSTestUtil.java From big-c with Apache License 2.0 | 6 votes |
/** For {@link TestTransferRbw} */ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException { assertEquals(2, datanodes.length); final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}, new StorageType[]{StorageType.DEFAULT}); out.flush(); return BlockOpResponseProto.parseDelimitedFrom(in); }
Example #2
Source File: Sender.java From big-c with Apache License 2.0 | 6 votes |
@Override public void readBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException { OpReadBlockProto proto = OpReadBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken)) .setOffset(blockOffset) .setLen(length) .setSendChecksums(sendChecksum) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .build(); send(out, Op.READ_BLOCK, proto); }
Example #3
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlock() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL), }; String[] storageIDs = {"s1", "s2", "s3", "s4"}; StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{}); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #4
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlockNoStorageMedia() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #5
Source File: Sender.java From big-c with Apache License 2.0 | 6 votes |
@Override public void requestShortCircuitFds(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, SlotId slotId, int maxVersion, boolean supportsReceiptVerification) throws IOException { OpRequestShortCircuitAccessProto.Builder builder = OpRequestShortCircuitAccessProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader( blk, blockToken)).setMaxVersion(maxVersion); if (slotId != null) { builder.setSlotId(PBHelper.convert(slotId)); } builder.setSupportsReceiptVerification(supportsReceiptVerification); OpRequestShortCircuitAccessProto proto = builder.build(); send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto); }
Example #6
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** For {@link TestTransferRbw} */ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException { assertEquals(2, datanodes.length); final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}, new StorageType[]{StorageType.DEFAULT}); out.flush(); return BlockOpResponseProto.parseDelimitedFrom(in); }
Example #7
Source File: DataXceiver.java From big-c with Apache License 2.0 | 6 votes |
@Override public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { checkAccess(socketOut, true, blk, blockToken, Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); previousOpClientName = clientName; updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); final DataOutputStream out = new DataOutputStream( getOutputStream()); try { datanode.transferReplicaForPipelineRecovery(blk, targets, targetStorageTypes, clientName); writeResponse(Status.SUCCESS, null, out); } catch (IOException ioe) { LOG.info("transferBlock " + blk + " received exception " + ioe); incrDatanodeNetworkErrors(); throw ioe; } finally { IOUtils.closeStream(out); } }
Example #8
Source File: TcpPeerServer.java From big-c with Apache License 2.0 | 6 votes |
public static Peer peerFromSocketAndKey( SaslDataTransferClient saslClient, Socket s, DataEncryptionKeyFactory keyFactory, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; try { peer = peerFromSocket(s); peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId); success = true; return peer; } finally { if (!success) { IOUtils.cleanup(null, peer); } } }
Example #9
Source File: Sender.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void readBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException { OpReadBlockProto proto = OpReadBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken)) .setOffset(blockOffset) .setLen(length) .setSendChecksums(sendChecksum) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .build(); send(out, Op.READ_BLOCK, proto); }
Example #10
Source File: DataNode.java From hadoop with Apache License 2.0 | 6 votes |
private void checkReadAccess(final ExtendedBlock block) throws IOException { if (isBlockTokenEnabled) { Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser() .getTokenIdentifiers(); if (tokenIds.size() != 1) { throw new IOException("Can't continue since none or more than one " + "BlockTokenIdentifier is found."); } for (TokenIdentifier tokenId : tokenIds) { BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId; if (LOG.isDebugEnabled()) { LOG.debug("Got: " + id.toString()); } blockPoolTokenSecretManager.checkAccess(id, null, block, BlockTokenSecretManager.AccessMode.READ); } } }
Example #11
Source File: Sender.java From big-c with Apache License 2.0 | 6 votes |
@Override public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { OpTransferBlockProto proto = OpTransferBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken)) .addAllTargets(PBHelper.convert(targets)) .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes)) .build(); send(out, Op.TRANSFER_BLOCK, proto); }
Example #12
Source File: SaslDataTransferClient.java From big-c with Apache License 2.0 | 6 votes |
/** * Checks if an address is already trusted and then sends client SASL * negotiation if required. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair checkTrustAndSend(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { if (!trustedChannelResolver.isTrusted() && !trustedChannelResolver.isTrusted(addr)) { // The encryption key factory only returns a key if encryption is enabled. DataEncryptionKey encryptionKey = encryptionKeyFactory.newDataEncryptionKey(); return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken, datanodeId); } else { LOG.debug( "SASL client skipping handshake on trusted connection for addr = {}, " + "datanodeId = {}", addr, datanodeId); return null; } }
Example #13
Source File: DataXceiver.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { checkAccess(socketOut, true, blk, blockToken, Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); previousOpClientName = clientName; updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); final DataOutputStream out = new DataOutputStream( getOutputStream()); try { datanode.transferReplicaForPipelineRecovery(blk, targets, targetStorageTypes, clientName); writeResponse(Status.SUCCESS, null, out); } catch (IOException ioe) { LOG.info("transferBlock " + blk + " received exception " + ioe); incrDatanodeNetworkErrors(); throw ioe; } finally { IOUtils.closeStream(out); } }
Example #14
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlock() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL), }; String[] storageIDs = {"s1", "s2", "s3", "s4"}; StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{}); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #15
Source File: Sender.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void blockChecksum(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException { OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .build(); send(out, Op.BLOCK_CHECKSUM, proto); }
Example #16
Source File: SaslDataTransferClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Sends client SASL negotiation for general-purpose handshake. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr); String userName = buildUserName(accessToken); char[] password = buildClientPassword(accessToken); CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName, password); return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps, callbackHandler); }
Example #17
Source File: ClientDatanodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId, long[] blockIds, List<Token<BlockTokenIdentifier>> tokens) throws IOException { List<TokenProto> tokensProtos = new ArrayList<TokenProto>(tokens.size()); for (Token<BlockTokenIdentifier> t : tokens) { tokensProtos.add(PBHelper.convert(t)); } // Build the request GetHdfsBlockLocationsRequestProto request = GetHdfsBlockLocationsRequestProto.newBuilder() .setBlockPoolId(blockPoolId) .addAllBlockIds(Longs.asList(blockIds)) .addAllTokens(tokensProtos) .build(); // Send the RPC GetHdfsBlockLocationsResponseProto response; try { response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } // List of volumes in the response List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList(); List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size()); for (ByteString bs : volumeIdsByteStrings) { volumeIds.add(bs.toByteArray()); } // Array of indexes into the list of volumes, one per block List<Integer> volumeIndexes = response.getVolumeIndexesList(); // Parsed HdfsVolumeId values, one per block return new HdfsBlocksMetadata(blockPoolId, blockIds, volumeIds, volumeIndexes); }
Example #18
Source File: Sender.java From big-c with Apache License 2.0 | 5 votes |
@Override public void blockChecksum(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException { OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .build(); send(out, Op.BLOCK_CHECKSUM, proto); }
Example #19
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 5 votes |
private void compare(Token<BlockTokenIdentifier> expected, Token<BlockTokenIdentifier> actual) { assertTrue(Arrays.equals(expected.getIdentifier(), actual.getIdentifier())); assertTrue(Arrays.equals(expected.getPassword(), actual.getPassword())); assertEquals(expected.getKind(), actual.getKind()); assertEquals(expected.getService(), actual.getService()); }
Example #20
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token, AccessMode accessMode) throws IOException { if (isBlockTokenEnabled) { BlockTokenIdentifier id = new BlockTokenIdentifier(); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); id.readFields(in); if (LOG.isDebugEnabled()) { LOG.debug("Got: " + id.toString()); } blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode); } }
Example #21
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 5 votes |
private void compare(Token<BlockTokenIdentifier> expected, Token<BlockTokenIdentifier> actual) { assertTrue(Arrays.equals(expected.getIdentifier(), actual.getIdentifier())); assertTrue(Arrays.equals(expected.getPassword(), actual.getPassword())); assertEquals(expected.getKind(), actual.getKind()); assertEquals(expected.getService(), actual.getService()); }
Example #22
Source File: Sender.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void copyBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException { OpCopyBlockProto proto = OpCopyBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .build(); send(out, Op.COPY_BLOCK, proto); }
Example #23
Source File: TestShortCircuitLocalRead.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException { final Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23); LocatedBlocks lb = cluster.getNameNode().getRpcServer() .getBlockLocations("/tmp/x", 0, 16); // Create a new block object, because the block inside LocatedBlock at // namenode is of type BlockInfo. ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock()); Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken(); final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; ClientDatanodeProtocol proxy = DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false); try { proxy.getBlockLocalPathInfo(blk, token); Assert.fail("The call should have failed as this user " + " is not allowed to call getBlockLocalPathInfo"); } catch (IOException ex) { Assert.assertTrue(ex.getMessage().contains( "not allowed to call getBlockLocalPathInfo")); } } finally { fs.close(); cluster.shutdown(); } }
Example #24
Source File: Sender.java From big-c with Apache License 2.0 | 5 votes |
@Override public void replaceBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo source) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .setStorageType(PBHelper.convertStorageType(storageType)) .setDelHint(delHint) .setSource(PBHelper.convertDatanodeInfo(source)) .build(); send(out, Op.REPLACE_BLOCK, proto); }
Example #25
Source File: TestFailoverWithBlockTokensEnabled.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyDfsClient = Mockito.spy(dfsClient); Mockito.doAnswer( new Answer<LocatedBlocks>() { @Override public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod(); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token<BlockTokenIdentifier> token = lb.getBlockToken(); BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier(); // This will make the token invalid, since the password // won't match anymore id.setExpiryDate(Time.now() + 10); Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient); try { assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block", ioe); } }
Example #26
Source File: DataTransferProtoUtil.java From hadoop with Apache License 2.0 | 5 votes |
static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Token<BlockTokenIdentifier> blockToken) { BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder() .setBlock(PBHelper.convert(blk)) .setToken(PBHelper.convert(blockToken)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()) .setParentId(s.getSpanId())); } return builder.build(); }
Example #27
Source File: SaslDataTransferClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Sends client SASL negotiation for general-purpose handshake. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr); String userName = buildUserName(accessToken); char[] password = buildClientPassword(accessToken); CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName, password); return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps, callbackHandler); }
Example #28
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk, final Token<BlockTokenIdentifier> token, int maxVersion) throws ShortCircuitFdsUnsupportedException, ShortCircuitFdsVersionException, IOException { if (fileDescriptorPassingDisabledReason != null) { throw new ShortCircuitFdsUnsupportedException( fileDescriptorPassingDisabledReason); } checkBlockToken(blk, token, BlockTokenSecretManager.AccessMode.READ); int blkVersion = CURRENT_BLOCK_FORMAT_VERSION; if (maxVersion < blkVersion) { throw new ShortCircuitFdsVersionException("Your client is too old " + "to read this block! Its format version is " + blkVersion + ", but the highest format version you can read is " + maxVersion); } metrics.incrBlocksGetLocalPathInfo(); FileInputStream fis[] = new FileInputStream[2]; try { fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0); fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data); } catch (ClassCastException e) { LOG.debug("requestShortCircuitFdsForRead failed", e); throw new ShortCircuitFdsUnsupportedException("This DataNode's " + "FsDatasetSpi does not support short-circuit local reads"); } return fis; }
Example #29
Source File: KeyManager.java From hadoop with Apache License 2.0 | 5 votes |
/** Get an access token for a block. */ public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb ) throws IOException { if (!isBlockTokenEnabled) { return BlockTokenSecretManager.DUMMY_TOKEN; } else { if (!shouldRun) { throw new IOException( "Cannot get access token since BlockKeyUpdater is not running"); } return blockTokenSecretManager.generateToken(null, eb, EnumSet.of(AccessMode.REPLACE, AccessMode.COPY)); } }
Example #30
Source File: KeyManager.java From big-c with Apache License 2.0 | 5 votes |
/** Get an access token for a block. */ public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb ) throws IOException { if (!isBlockTokenEnabled) { return BlockTokenSecretManager.DUMMY_TOKEN; } else { if (!shouldRun) { throw new IOException( "Cannot get access token since BlockKeyUpdater is not running"); } return blockTokenSecretManager.generateToken(null, eb, EnumSet.of(AccessMode.REPLACE, AccessMode.COPY)); } }