org.apache.hadoop.security.authorize.PolicyProvider Java Examples

The following examples show how to use org.apache.hadoop.security.authorize.PolicyProvider. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestXAttrCLI.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
 
Example #2
Source File: TestCacheAdminCLI.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
Example #3
Source File: TestCryptoAdminCLI.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
 
Example #4
Source File: ZKFCRpcServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;
  
  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();
  
  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
 
Example #5
Source File: ZKFCRpcServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;
  
  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();
  
  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
 
Example #6
Source File: TestCryptoAdminCLI.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
 
Example #7
Source File: TestCacheAdminCLI.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
Example #8
Source File: TestXAttrCLI.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
 
Example #9
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
  
  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
 
Example #10
Source File: AdminService.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException, IOException {
  if (!getConfig().getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  String argName = "refreshServiceAcls";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh Service ACLs.");

  PolicyProvider policyProvider = RMPolicyProvider.getInstance();
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);

  refreshServiceAcls(conf, policyProvider);
  rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
  rmContext.getApplicationMasterService().refreshServiceAcls(
      conf, policyProvider);
  rmContext.getResourceTrackerService().refreshServiceAcls(
      conf, policyProvider);

  RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
 
Example #11
Source File: TestHDFSCLI.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  
  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  
  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
Example #12
Source File: StreamingContainerParent.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
protected void startRpcServer()
{
  Configuration conf = getConfig();
  LOG.info("Config: " + conf);
  LOG.info("Listener thread count " + listenerThreadCount);
  try {
    server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class).setInstance(this)
        .setBindAddress("0.0.0.0").setPort(0).setNumHandlers(listenerThreadCount).setSecretManager(tokenSecretManager)
        .setVerbose(false).build();

    // Enable service authorization?
    if (conf.getBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        false)) {
      //refreshServiceAcls(conf, new MRAMPolicyProvider());
      server.refreshServiceAcl(conf, new PolicyProvider()
      {

        @Override
        public Service[] getServices()
        {
          return (new Service[]{
              new Service(StreamingContainerUmbilicalProtocol.class
                  .getName(), StreamingContainerUmbilicalProtocol.class)
          });
        }

      });
    }

    server.start();
    this.address = NetUtils.getConnectAddress(server);
    LOG.info("Container callback server listening at " + this.address);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
Example #13
Source File: TestHDFSCLI.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  
  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  
  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
 
Example #14
Source File: TestTokenAuthentication.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  // Override the connection registry to avoid spinning up a mini cluster for the connection below
  // to go through.
  TEST_UTIL.getConfiguration().set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY,
      HConstants.ZK_CONNECTION_REGISTRY_CLASS);
  TEST_UTIL.startMiniZKCluster();
  // register token type for protocol
  SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(),
    new SecurityInfo("hbase.test.kerberos.principal",
      AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN));
  // security settings only added after startup so that ZK does not require SASL
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set("hadoop.security.authentication", "kerberos");
  conf.set("hbase.security.authentication", "kerberos");
  conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true);
  conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl);
  server = new TokenServer(conf, TEST_UTIL);
  serverThread = new Thread(server);
  Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString());
  // wait for startup
  while (!server.isStarted() && !server.isStopped()) {
    Thread.sleep(10);
  }
  server.rpcServer.refreshAuthManager(conf, new PolicyProvider() {
    @Override
    public Service[] getServices() {
      return new Service [] {
        new Service("security.client.protocol.acl",
          AuthenticationProtos.AuthenticationService.BlockingInterface.class)};
    }
  });
  ZKClusterId.setClusterId(server.getZooKeeper(), clusterId);
  secretManager = (AuthenticationTokenSecretManager)server.getSecretManager();
  while(secretManager.getCurrentKey() == null) {
    Thread.sleep(1);
  }
}
 
Example #15
Source File: TestCLI.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();
  
  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;
  
   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
 
Example #16
Source File: RpcServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void refreshAuthManager(Configuration conf, PolicyProvider pp) {
  // Ignore warnings that this should be accessed in a static way instead of via an instance;
  // it'll break if you go via static route.
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  this.authManager.refresh(conf, pp);
  LOG.info("Refreshed hbase-policy.xml successfully");
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  LOG.info("Refreshed super and proxy users successfully");
}
 
Example #17
Source File: StreamingContainerParent.java    From Bats with Apache License 2.0 5 votes vote down vote up
protected void startRpcServer()
{
  Configuration conf = getConfig();
  LOG.info("Config: " + conf);
  LOG.info("Listener thread count " + listenerThreadCount);
  try {
    server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class).setInstance(this)
        .setBindAddress("0.0.0.0").setPort(0).setNumHandlers(listenerThreadCount).setSecretManager(tokenSecretManager)
        .setVerbose(false).build();

    // Enable service authorization?
    if (conf.getBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        false)) {
      //refreshServiceAcls(conf, new MRAMPolicyProvider());
      server.refreshServiceAcl(conf, new PolicyProvider()
      {

        @Override
        public Service[] getServices()
        {
          return (new Service[]{
              new Service(StreamingContainerUmbilicalProtocol.class
                  .getName(), StreamingContainerUmbilicalProtocol.class)
          });
        }

      });
    }

    server.start();
    this.address = NetUtils.getConnectAddress(server);
    LOG.info("Container callback server listening at " + this.address);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
Example #18
Source File: AdminService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException, IOException {
  if (!getConfig().getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  String argName = "refreshServiceAcls";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh Service ACLs.");

  PolicyProvider policyProvider = RMPolicyProvider.getInstance();
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);

  refreshServiceAcls(conf, policyProvider);
  rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
  rmContext.getApplicationMasterService().refreshServiceAcls(
      conf, policyProvider);
  rmContext.getResourceTrackerService().refreshServiceAcls(
      conf, policyProvider);

  RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
 
Example #19
Source File: TestCLI.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();
  
  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;
  
   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
 
Example #20
Source File: DFSZKFailoverController.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected PolicyProvider getPolicyProvider() {
  return new HDFSPolicyProvider();
}
 
Example #21
Source File: AdminService.java    From big-c with Apache License 2.0 4 votes vote down vote up
private synchronized void refreshServiceAcls(Configuration configuration,
    PolicyProvider policyProvider) {
  this.server.refreshServiceAclWithLoadedConfiguration(configuration,
      policyProvider);
}
 
Example #22
Source File: TezTaskCommunicatorImpl.java    From tez with Apache License 2.0 4 votes vote down vote up
private void refreshServiceAcls(Configuration configuration,
                                PolicyProvider policyProvider) {
  this.server.refreshServiceAcl(configuration, policyProvider);
}
 
Example #23
Source File: DAGClientServer.java    From tez with Apache License 2.0 4 votes vote down vote up
private void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
  this.server.refreshServiceAcl(configuration, policyProvider);
}
 
Example #24
Source File: TaskAttemptListenerImpTezDag.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
void refreshServiceAcls(Configuration configuration,
    PolicyProvider policyProvider) {
  this.server.refreshServiceAcl(configuration, policyProvider);
}
 
Example #25
Source File: Server.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Refresh the service authorization ACL for the service handled by this server.
 */
public void refreshServiceAcl(Configuration conf, PolicyProvider provider) {
  serviceAuthorizationManager.refresh(conf, provider);
}
 
Example #26
Source File: Server.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Refresh the service authorization ACL for the service handled by this server
 * using the specified Configuration.
 */
@Private
public void refreshServiceAclWithLoadedConfiguration(Configuration conf,
    PolicyProvider provider) {
  serviceAuthorizationManager.refreshWithLoadedConfiguration(conf, provider);
}
 
Example #27
Source File: MiniZKFCCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected PolicyProvider getPolicyProvider() {
  return null;
}
 
Example #28
Source File: DAGClientServer.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
private void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
  this.server.refreshServiceAcl(configuration, policyProvider);
}
 
Example #29
Source File: StreamingContainerParent.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
void refreshServiceAcls(Configuration configuration,
    PolicyProvider policyProvider)
{
  this.server.refreshServiceAcl(configuration, policyProvider);
}
 
Example #30
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void initConfig(Configuration conf) throws IOException {
  if (conf.get("slave.host.name") != null) {
    machineName = conf.get("slave.host.name");   
  }
  if (machineName == null) {
    machineName = DNS.getDefaultHost(
                                   conf.get("dfs.datanode.dns.interface","default"),
                                   conf.get("dfs.datanode.dns.nameserver","default"));
  }
  // Allow configuration to delay block reports to find bugs
  artificialBlockReceivedDelay = conf.getInt(
    "dfs.datanode.artificialBlockReceivedDelay", 0);
  if (conf.getBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
        .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
            HDFSPolicyProvider.class, PolicyProvider.class), conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
      HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
      HdfsConstants.WRITE_TIMEOUT);
  this.socketWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);
  
  /* Based on results on different platforms, we might need set the default 
   * to false on some of them. */
  this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
                                           true);

  // TODO: remove the global setting and change data protocol to support
  // per session setting for this value.
  this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum",
      false);

  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
  
  this.deletedReportInterval =
    conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
  // Calculate the full block report interval
  int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2);
  this.blockReportInterval = fullReportMagnifier * deletedReportInterval;
  this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
  long heartbeatRecheckInterval = conf.getInt(
      "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
      10 * heartBeatInterval;
  
  this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay",
      BLOCKREPORT_INITIAL_DELAY) * 1000L;
  if (this.initialBlockReportDelay >= blockReportInterval) {
    this.initialBlockReportDelay = 0;
    LOG.info("dfs.blockreport.initialDelay is greater than "
        + "dfs.blockreport.intervalMsec."
        + " Setting initial delay to 0 msec:");
  }

  // do we need to sync block file contents to disk when blockfile is closed?
  this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false);
  
  this.minDiskCheckIntervalMsec = conf.getLong(
      "dfs.datnode.checkdisk.mininterval",
      FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC);
}