Java Code Examples for org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier

The following examples show how to use org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final long blockOffset,
    final long length,
    final boolean sendChecksum,
    final CachingStrategy cachingStrategy) throws IOException {

  OpReadBlockProto proto = OpReadBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
    .setOffset(blockOffset)
    .setLen(length)
    .setSendChecksums(sendChecksum)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .build();

  send(out, Op.READ_BLOCK, proto);
}
 
Example 2
Source Project: big-c   Source File: Sender.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  
  OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(
        blk, clientName, blockToken))
    .addAllTargets(PBHelper.convert(targets))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes))
    .build();

  send(out, Op.TRANSFER_BLOCK, proto);
}
 
Example 3
Source Project: big-c   Source File: Sender.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void requestShortCircuitFds(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
      throws IOException {
  OpRequestShortCircuitAccessProto.Builder builder =
      OpRequestShortCircuitAccessProto.newBuilder()
        .setHeader(DataTransferProtoUtil.buildBaseHeader(
          blk, blockToken)).setMaxVersion(maxVersion);
  if (slotId != null) {
    builder.setSlotId(PBHelper.convert(slotId));
  }
  builder.setSupportsReceiptVerification(supportsReceiptVerification);
  OpRequestShortCircuitAccessProto proto = builder.build();
  send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto);
}
 
Example 4
Source Project: hadoop   Source File: DataXceiver.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 5
Source Project: big-c   Source File: SaslDataTransferClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Checks if an address is already trusted and then sends client SASL
 * negotiation if required.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param encryptionKeyFactory for creation of an encryption key
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair checkTrustAndSend(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    DataEncryptionKeyFactory encryptionKeyFactory,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  if (!trustedChannelResolver.isTrusted() &&
      !trustedChannelResolver.isTrusted(addr)) {
    // The encryption key factory only returns a key if encryption is enabled.
    DataEncryptionKey encryptionKey =
      encryptionKeyFactory.newDataEncryptionKey();
    return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
      datanodeId);
  } else {
    LOG.debug(
      "SASL client skipping handshake on trusted connection for addr = {}, "
      + "datanodeId = {}", addr, datanodeId);
    return null;
  }
}
 
Example 6
Source Project: hadoop   Source File: DataNode.java    License: Apache License 2.0 6 votes vote down vote up
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
 
Example 7
Source Project: big-c   Source File: DFSTestUtil.java    License: Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 8
Source Project: big-c   Source File: DataXceiver.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 9
Source Project: hadoop   Source File: DFSTestUtil.java    License: Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 10
Source Project: big-c   Source File: Sender.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final long blockOffset,
    final long length,
    final boolean sendChecksum,
    final CachingStrategy cachingStrategy) throws IOException {

  OpReadBlockProto proto = OpReadBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
    .setOffset(blockOffset)
    .setLen(length)
    .setSendChecksums(sendChecksum)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .build();

  send(out, Op.READ_BLOCK, proto);
}
 
Example 11
Source Project: hadoop   Source File: TestPBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 12
Source Project: hadoop   Source File: TestPBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 13
Source Project: big-c   Source File: TcpPeerServer.java    License: Apache License 2.0 6 votes vote down vote up
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
 
Example 14
Source Project: big-c   Source File: TestPBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
Example 15
Source Project: hadoop   Source File: BlockReaderLocalLegacy.java    License: Apache License 2.0 5 votes vote down vote up
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname,
    StorageType storageType) throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    // We cannot cache the path information for a replica on transient storage.
    // If the replica gets evicted, then it moves to a different path.  Then,
    // our next attempt to read from the cached path would fail to find the
    // file.  Additionally, the failure would cause us to disable legacy
    // short-circuit read for all subsequent use in the ClientContext.  Unlike
    // the newer short-circuit read implementation, we have no communication
    // channel for the DataNode to notify the client that the path has been
    // invalidated.  Therefore, our only option is to skip caching.
    if (pathinfo != null && !storageType.isTransient()) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
Example 16
Source Project: big-c   Source File: ClientDatanodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
    long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException {
  List<TokenProto> tokensProtos = 
      new ArrayList<TokenProto>(tokens.size());
  for (Token<BlockTokenIdentifier> t : tokens) {
    tokensProtos.add(PBHelper.convert(t));
  }
  // Build the request
  GetHdfsBlockLocationsRequestProto request = 
      GetHdfsBlockLocationsRequestProto.newBuilder()
      .setBlockPoolId(blockPoolId)
      .addAllBlockIds(Longs.asList(blockIds))
      .addAllTokens(tokensProtos)
      .build();
  // Send the RPC
  GetHdfsBlockLocationsResponseProto response;
  try {
    response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  // List of volumes in the response
  List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
  List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
  for (ByteString bs : volumeIdsByteStrings) {
    volumeIds.add(bs.toByteArray());
  }
  // Array of indexes into the list of volumes, one per block
  List<Integer> volumeIndexes = response.getVolumeIndexesList();
  // Parsed HdfsVolumeId values, one per block
  return new HdfsBlocksMetadata(blockPoolId, blockIds,
      volumeIds, volumeIndexes);
}
 
Example 17
Source Project: hadoop   Source File: SaslDataTransferClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Sends client SASL negotiation for general-purpose handshake.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair getSaslStreams(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);

  String userName = buildUserName(accessToken);
  char[] password = buildClientPassword(accessToken);
  CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
    password);
  return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
    callbackHandler);
}
 
Example 18
Source Project: big-c   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void blockChecksum(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
    .build();
  
  send(out, Op.BLOCK_CHECKSUM, proto);
}
 
Example 19
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void replaceBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String delHint,
    final DatanodeInfo source) throws IOException {
  OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
    .setStorageType(PBHelper.convertStorageType(storageType))
    .setDelHint(delHint)
    .setSource(PBHelper.convertDatanodeInfo(source))
    .build();
  
  send(out, Op.REPLACE_BLOCK, proto);
}
 
Example 20
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void copyBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  OpCopyBlockProto proto = OpCopyBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
    .build();
  
  send(out, Op.COPY_BLOCK, proto);
}
 
Example 21
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void blockChecksum(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
    .build();
  
  send(out, Op.BLOCK_CHECKSUM, proto);
}
 
Example 22
Source Project: big-c   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 23
Source Project: big-c   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void replaceBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String delHint,
    final DatanodeInfo source) throws IOException {
  OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
    .setStorageType(PBHelper.convertStorageType(storageType))
    .setDelHint(delHint)
    .setSource(PBHelper.convertDatanodeInfo(source))
    .build();
  
  send(out, Op.REPLACE_BLOCK, proto);
}
 
Example 24
Source Project: hadoop   Source File: DataTransferProtoUtil.java    License: Apache License 2.0 5 votes vote down vote up
static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
    Token<BlockTokenIdentifier> blockToken) {
  BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
    .setBlock(PBHelper.convert(blk))
    .setToken(PBHelper.convert(blockToken));
  if (Trace.isTracing()) {
    Span s = Trace.currentSpan();
    builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
        .setTraceId(s.getTraceId())
        .setParentId(s.getSpanId()));
  }
  return builder.build();
}
 
Example 25
Source Project: big-c   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> token, int maxVersion) 
        throws ShortCircuitFdsUnsupportedException,
          ShortCircuitFdsVersionException, IOException {
  if (fileDescriptorPassingDisabledReason != null) {
    throw new ShortCircuitFdsUnsupportedException(
        fileDescriptorPassingDisabledReason);
  }
  checkBlockToken(blk, token, BlockTokenSecretManager.AccessMode.READ);
  int blkVersion = CURRENT_BLOCK_FORMAT_VERSION;
  if (maxVersion < blkVersion) {
    throw new ShortCircuitFdsVersionException("Your client is too old " +
      "to read this block!  Its format version is " + 
      blkVersion + ", but the highest format version you can read is " +
      maxVersion);
  }
  metrics.incrBlocksGetLocalPathInfo();
  FileInputStream fis[] = new FileInputStream[2];
  
  try {
    fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
    fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
  } catch (ClassCastException e) {
    LOG.debug("requestShortCircuitFdsForRead failed", e);
    throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
        "FsDatasetSpi does not support short-circuit local reads");
  }
  return fis;
}
 
Example 26
Source Project: hadoop   Source File: KeyManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Get an access token for a block. */
public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
    ) throws IOException {
  if (!isBlockTokenEnabled) {
    return BlockTokenSecretManager.DUMMY_TOKEN;
  } else {
    if (!shouldRun) {
      throw new IOException(
          "Cannot get access token since BlockKeyUpdater is not running");
    }
    return blockTokenSecretManager.generateToken(null, eb,
        EnumSet.of(AccessMode.REPLACE, AccessMode.COPY));
  }
}
 
Example 27
Source Project: big-c   Source File: KeyManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Get an access token for a block. */
public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
    ) throws IOException {
  if (!isBlockTokenEnabled) {
    return BlockTokenSecretManager.DUMMY_TOKEN;
  } else {
    if (!shouldRun) {
      throw new IOException(
          "Cannot get access token since BlockKeyUpdater is not running");
    }
    return blockTokenSecretManager.generateToken(null, eb,
        EnumSet.of(AccessMode.REPLACE, AccessMode.COPY));
  }
}
 
Example 28
Source Project: big-c   Source File: SaslDataTransferClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Sends client SASL negotiation for general-purpose handshake.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair getSaslStreams(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);

  String userName = buildUserName(accessToken);
  char[] password = buildClientPassword(accessToken);
  CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
    password);
  return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
    callbackHandler);
}
 
Example 29
Source Project: hadoop   Source File: TestFailoverWithBlockTokensEnabled.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException,
    URISyntaxException {
  cluster.transitionToActive(0);
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
  assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
  
  DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
  DFSClient spyDfsClient = Mockito.spy(dfsClient);
  Mockito.doAnswer(
      new Answer<LocatedBlocks>() {
        @Override
        public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
          LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod();
          for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            Token<BlockTokenIdentifier> token = lb.getBlockToken();
            BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
            // This will make the token invalid, since the password
            // won't match anymore
            id.setExpiryDate(Time.now() + 10);
            Token<BlockTokenIdentifier> newToken =
                new Token<BlockTokenIdentifier>(id.getBytes(),
                    token.getPassword(), token.getKind(), token.getService());
            lb.setBlockToken(newToken);
          }
          return locatedBlocks;
        }
      }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),
          Mockito.anyLong(), Mockito.anyLong());
  DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient);
  
  try {
    assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
    fail("Shouldn't have been able to read a file with invalid block tokens");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
  }
}
 
Example 30
Source Project: big-c   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token,
    AccessMode accessMode) throws IOException {
  if (isBlockTokenEnabled) {
    BlockTokenIdentifier id = new BlockTokenIdentifier();
    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
    DataInputStream in = new DataInputStream(buf);
    id.readFields(in);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Got: " + id.toString());
    }
    blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode);
  }
}