org.apache.hadoop.security.authorize.ServiceAuthorizationManager Java Examples

The following examples show how to use org.apache.hadoop.security.authorize.ServiceAuthorizationManager. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimpleRpcServer.java    From hbase with Apache License 2.0 6 votes vote down vote up
/** Starts the service.  Must be called before any calls will be handled. */
@Override
public synchronized void start() {
  if (started) return;
  authTokenSecretMgr = createSecretManager();
  if (authTokenSecretMgr != null) {
    setSecretManager(authTokenSecretMgr);
    authTokenSecretMgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
  started = true;
}
 
Example #2
Source File: TestRPC.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testAuthorization() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
  
  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);
  
  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
 
Example #3
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
  
  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
 
Example #4
Source File: RPC.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
@Override
public void authorize(Subject user, ConnectionHeader connection) 
throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol);
  }
}
 
Example #5
Source File: RPC.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 */
public Server(Object instance, Configuration conf, String bindAddress,  int port,
              int numHandlers, boolean verbose) throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, conf, classNameBase(instance.getClass().getName()));
  this.instance = instance;
  this.verbose = verbose;
  this.authorize = 
    conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    false);
}
 
Example #6
Source File: TestRPC.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void testAuthorization() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
  
  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);
  
  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
 
Example #7
Source File: TestCLI.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();
  
  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;
  
   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
 
Example #8
Source File: JobTracker.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
@Override
public void refreshServiceAcl() throws IOException {
  if (!conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }
  SecurityUtil.getPolicy().refresh();
}
 
Example #9
Source File: RPC.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void authorize(Subject user, ConnectionHeader connection) 
throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol);
  }
}
 
Example #10
Source File: RPC.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 */
public Server(Object instance, Configuration conf, String bindAddress,  int port,
              int numHandlers, boolean verbose) throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, conf, classNameBase(instance.getClass().getName()));
  this.instance = instance;
  this.verbose = verbose;
  this.authorize = 
    conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    false);
}
 
Example #11
Source File: TestCLI.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();
  
  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;
  
   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
 
Example #12
Source File: JobTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void refreshServiceAcl() throws IOException {
  if (!conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }
  SecurityUtil.getPolicy().refresh();
}
 
Example #13
Source File: NettyRpcServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void start() {
  if (started) {
    return;
  }
  authTokenSecretMgr = createSecretManager();
  if (authTokenSecretMgr != null) {
    setSecretManager(authTokenSecretMgr);
    authTokenSecretMgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  scheduler.start();
  started = true;
}
 
Example #14
Source File: HBasePolicyProvider.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
 
Example #15
Source File: TestRMAdminService.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
 
Example #16
Source File: TestRMAdminService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
 
Example #17
Source File: TestRMAdminService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testServiceAclsRefreshWithFileSystemBasedConfigurationProvider()
    throws IOException, YarnException {
  configuration.setBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
  configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
      "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
  ResourceManager resourceManager = null;
  try {

    //upload default configurations
    uploadDefaultConfiguration();
    Configuration conf = new Configuration();
    conf.setBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    uploadConfiguration(conf, "core-site.xml");
    try {
      resourceManager = new ResourceManager();
      resourceManager.init(configuration);
      resourceManager.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    String aclsString = "alice,bob users,wheel";
    Configuration newConf = new Configuration();
    newConf.set("security.applicationclient.protocol.acl", aclsString);
    uploadConfiguration(newConf, "hadoop-policy.xml");

    resourceManager.adminService.refreshServiceAcls(RefreshServiceAclsRequest
        .newInstance());

    // verify service Acls refresh for AdminService
    ServiceAuthorizationManager adminServiceServiceManager =
        resourceManager.adminService.getServer()
            .getServiceAuthorizationManager();
    verifyServiceACLsRefresh(adminServiceServiceManager,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ClientRMService
    ServiceAuthorizationManager clientRMServiceServiceManager =
        resourceManager.getRMContext().getClientRMService().getServer()
            .getServiceAuthorizationManager();
    verifyServiceACLsRefresh(clientRMServiceServiceManager,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ApplicationMasterService
    ServiceAuthorizationManager appMasterService =
        resourceManager.getRMContext().getApplicationMasterService()
            .getServer().getServiceAuthorizationManager();
    verifyServiceACLsRefresh(appMasterService,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ResourceTrackerService
    ServiceAuthorizationManager RTService =
        resourceManager.getRMContext().getResourceTrackerService()
            .getServer().getServiceAuthorizationManager();
    verifyServiceACLsRefresh(RTService,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);
  } finally {
    if (resourceManager != null) {
      resourceManager.stop();
    }
  }
}
 
Example #18
Source File: NameNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 */
private void initialize() throws IOException {    
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class), 
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  InetSocketAddress clientSocket = NameNode.getAddress(getConf());
  ServerSocket socket = new ServerSocket();
  socket.bind(clientSocket);
  socket.close();
  InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(getConf());
  if (dnSocket != null) {
    socket = new ServerSocket();
    socket.bind(dnSocket);
    socket.close();
    //System.err.println("Tested " + dnSocket);
  }
  
  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);
  
  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get("dfs.cluster.name");
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
 
Example #19
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void initConfig(Configuration conf) throws IOException {
  if (conf.get("slave.host.name") != null) {
    machineName = conf.get("slave.host.name");   
  }
  if (machineName == null) {
    machineName = DNS.getDefaultHost(
                                   conf.get("dfs.datanode.dns.interface","default"),
                                   conf.get("dfs.datanode.dns.nameserver","default"));
  }
  // Allow configuration to delay block reports to find bugs
  artificialBlockReceivedDelay = conf.getInt(
    "dfs.datanode.artificialBlockReceivedDelay", 0);
  if (conf.getBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
        .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
            HDFSPolicyProvider.class, PolicyProvider.class), conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
      HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
      HdfsConstants.WRITE_TIMEOUT);
  this.socketWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);
  
  /* Based on results on different platforms, we might need set the default 
   * to false on some of them. */
  this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
                                           true);

  // TODO: remove the global setting and change data protocol to support
  // per session setting for this value.
  this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum",
      false);

  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
  
  this.deletedReportInterval =
    conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
  // Calculate the full block report interval
  int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2);
  this.blockReportInterval = fullReportMagnifier * deletedReportInterval;
  this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
  long heartbeatRecheckInterval = conf.getInt(
      "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
      10 * heartBeatInterval;
  
  this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay",
      BLOCKREPORT_INITIAL_DELAY) * 1000L;
  if (this.initialBlockReportDelay >= blockReportInterval) {
    this.initialBlockReportDelay = 0;
    LOG.info("dfs.blockreport.initialDelay is greater than "
        + "dfs.blockreport.intervalMsec."
        + " Setting initial delay to 0 msec:");
  }

  // do we need to sync block file contents to disk when blockfile is closed?
  this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false);
  
  this.minDiskCheckIntervalMsec = conf.getLong(
      "dfs.datnode.checkdisk.mininterval",
      FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC);
}
 
Example #20
Source File: Server.java    From big-c with Apache License 2.0 4 votes vote down vote up
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public ServiceAuthorizationManager getServiceAuthorizationManager() {
  return serviceAuthorizationManager;
}
 
Example #21
Source File: TestRMAdminService.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testServiceAclsRefreshWithFileSystemBasedConfigurationProvider()
    throws IOException, YarnException {
  configuration.setBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
  configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
      "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
  ResourceManager resourceManager = null;
  try {

    //upload default configurations
    uploadDefaultConfiguration();
    Configuration conf = new Configuration();
    conf.setBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    uploadConfiguration(conf, "core-site.xml");
    try {
      resourceManager = new ResourceManager();
      resourceManager.init(configuration);
      resourceManager.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    String aclsString = "alice,bob users,wheel";
    Configuration newConf = new Configuration();
    newConf.set("security.applicationclient.protocol.acl", aclsString);
    uploadConfiguration(newConf, "hadoop-policy.xml");

    resourceManager.adminService.refreshServiceAcls(RefreshServiceAclsRequest
        .newInstance());

    // verify service Acls refresh for AdminService
    ServiceAuthorizationManager adminServiceServiceManager =
        resourceManager.adminService.getServer()
            .getServiceAuthorizationManager();
    verifyServiceACLsRefresh(adminServiceServiceManager,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ClientRMService
    ServiceAuthorizationManager clientRMServiceServiceManager =
        resourceManager.getRMContext().getClientRMService().getServer()
            .getServiceAuthorizationManager();
    verifyServiceACLsRefresh(clientRMServiceServiceManager,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ApplicationMasterService
    ServiceAuthorizationManager appMasterService =
        resourceManager.getRMContext().getApplicationMasterService()
            .getServer().getServiceAuthorizationManager();
    verifyServiceACLsRefresh(appMasterService,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);

    // verify service ACLs refresh for ResourceTrackerService
    ServiceAuthorizationManager RTService =
        resourceManager.getRMContext().getResourceTrackerService()
            .getServer().getServiceAuthorizationManager();
    verifyServiceACLsRefresh(RTService,
        org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
        aclsString);
  } finally {
    if (resourceManager != null) {
      resourceManager.stop();
    }
  }
}
 
Example #22
Source File: Server.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public ServiceAuthorizationManager getServiceAuthorizationManager() {
  return serviceAuthorizationManager;
}