org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Dispatcher.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
 
Example #2
Source File: TcpPeerServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
 
Example #3
Source File: Dispatcher.java    From big-c with Apache License 2.0 6 votes vote down vote up
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
 
Example #4
Source File: TcpPeerServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
 
Example #5
Source File: DFSClient.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** 
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for 
 * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
 * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode 
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
    Configuration conf, FileSystem.Statistics stats)
  throws IOException {
  SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
  traceSampler = new SamplerBuilder(TraceUtils.
      wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
  // Copy only the required DFSClient configuration
  this.dfsClientConf = new Conf(conf);
  if (this.dfsClientConf.useLegacyBlockReaderLocal) {
    LOG.debug("Using legacy short-circuit local reads.");
  }
  this.conf = conf;
  this.stats = stats;
  this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

  this.ugi = UserGroupInformation.getCurrentUser();
  
  this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
  this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
      DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
  int numResponseToDrop = conf.getInt(
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
  NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
  AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
  if (numResponseToDrop > 0) {
    // This case is used for testing.
    LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
        + " is set to " + numResponseToDrop
        + ", this hacked client will proactively drop responses");
    proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
        nameNodeUri, ClientProtocol.class, numResponseToDrop,
        nnFallbackToSimpleAuth);
  }
  
  if (proxyInfo != null) {
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  } else if (rpcNamenode != null) {
    // This case is used for testing.
    Preconditions.checkArgument(nameNodeUri == null);
    this.namenode = rpcNamenode;
    dtService = null;
  } else {
    Preconditions.checkArgument(nameNodeUri != null,
        "null URI");
    proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
        ClientProtocol.class, nnFallbackToSimpleAuth);
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  }

  String localInterfaces[] =
    conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
  localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
  if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
    LOG.debug("Using local interfaces [" +
    Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
    Joiner.on(',').join(localInterfaceAddrs) + "]");
  }
  
  Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
  Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ?
      null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
  Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
  this.defaultReadCachingStrategy =
      new CachingStrategy(readDropBehind, readahead);
  this.defaultWriteCachingStrategy =
      new CachingStrategy(writeDropBehind, readahead);
  this.clientContext = ClientContext.get(
      conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
      dfsClientConf);
  this.hedgedReadThresholdMillis = conf.getLong(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
  int numThreads = conf.getInt(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
  if (numThreads > 0) {
    this.initThreadsNumForHedgedReads(numThreads);
  }
  this.saslClient = new SaslDataTransferClient(
    conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
    TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}
 
Example #6
Source File: DataNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #7
Source File: DFSClient.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** 
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for 
 * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
 * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode 
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
    Configuration conf, FileSystem.Statistics stats)
  throws IOException {
  SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
  traceSampler = new SamplerBuilder(TraceUtils.
      wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
  // Copy only the required DFSClient configuration
  this.dfsClientConf = new Conf(conf);
  if (this.dfsClientConf.useLegacyBlockReaderLocal) {
    LOG.debug("Using legacy short-circuit local reads.");
  }
  this.conf = conf;
  this.stats = stats;
  this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

  this.ugi = UserGroupInformation.getCurrentUser();
  
  this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
  this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
      DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
  int numResponseToDrop = conf.getInt(
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
  NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
  AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
  if (numResponseToDrop > 0) {
    // This case is used for testing.
    LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
        + " is set to " + numResponseToDrop
        + ", this hacked client will proactively drop responses");
    proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
        nameNodeUri, ClientProtocol.class, numResponseToDrop,
        nnFallbackToSimpleAuth);
  }
  
  if (proxyInfo != null) {
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  } else if (rpcNamenode != null) {
    // This case is used for testing.
    Preconditions.checkArgument(nameNodeUri == null);
    this.namenode = rpcNamenode;
    dtService = null;
  } else {
    Preconditions.checkArgument(nameNodeUri != null,
        "null URI");
    proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
        ClientProtocol.class, nnFallbackToSimpleAuth);
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  }

  String localInterfaces[] =
    conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
  localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
  if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
    LOG.debug("Using local interfaces [" +
    Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
    Joiner.on(',').join(localInterfaceAddrs) + "]");
  }
  
  Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
  Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ?
      null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
  Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
  this.defaultReadCachingStrategy =
      new CachingStrategy(readDropBehind, readahead);
  this.defaultWriteCachingStrategy =
      new CachingStrategy(writeDropBehind, readahead);
  this.clientContext = ClientContext.get(
      conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
      dfsClientConf);
  this.hedgedReadThresholdMillis = conf.getLong(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
  int numThreads = conf.getInt(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
  if (numThreads > 0) {
    this.initThreadsNumForHedgedReads(numThreads);
  }
  this.saslClient = new SaslDataTransferClient(
    conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
    TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}
 
Example #8
Source File: DataNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #9
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java    From hbase with Apache License 2.0 4 votes vote down vote up
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
    Promise<Void> saslPromise) throws IOException {
  SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
  SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
  TrustedChannelResolver trustedChannelResolver =
      SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
  AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
  InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
  if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
    saslPromise.trySuccess(null);
    return;
  }
  DataEncryptionKey encryptionKey = client.newDataEncryptionKey();
  if (encryptionKey != null) {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        "SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
      encryptionKeyToPassword(encryptionKey.encryptionKey),
      createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise,
        client);
  } else if (!UserGroupInformation.isSecurityEnabled()) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
          + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (dnInfo.getXferPort() < 1024) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with "
          + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with "
          + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (saslPropsResolver != null) {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        "SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
      buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise,
        client);
  } else {
    // It's a secured cluster using non-privileged ports, but no SASL. The only way this can
    // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
    // edge case.
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
          + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  }
}
 
Example #10
Source File: DFSClient.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
 
Example #11
Source File: DFSClient.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
 
Example #12
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java    From hbase with Apache License 2.0 votes vote down vote up
TrustedChannelResolver getTrustedChannelResolver(SaslDataTransferClient saslClient); 
Example #13
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java    From hbase with Apache License 2.0 votes vote down vote up
SaslPropertiesResolver getSaslPropsResolver(SaslDataTransferClient saslClient); 
Example #14
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java    From hbase with Apache License 2.0 votes vote down vote up
AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient);