Java Code Examples for org.apache.hadoop.fs.CommonConfigurationKeys

The following examples show how to use org.apache.hadoop.fs.CommonConfigurationKeys. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: DFSAdmin.java    License: Apache License 2.0 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example 2
Source Project: hadoop   Source File: TestSaslRPC.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPingInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
  conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
      CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);

  // set doPing to true
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
  ConnectionId remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
      remoteId.getPingInterval());
  // set doPing to false
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
  remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(0, remoteId.getPingInterval());
}
 
Example 3
Source Project: big-c   Source File: TestServiceAuthorization.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultAcl() {
  ServiceAuthorizationManager serviceAuthorizationManager = 
      new ServiceAuthorizationManager();
  Configuration conf = new Configuration ();
  // test without setting a default acl
  conf.set(ACL_CONFIG, "user1 group1");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());

  // test with a default acl
  conf.set(
      CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, 
      "user2 group2");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals("user2 group2", acl.getAclString());
}
 
Example 4
Source Project: big-c   Source File: TestCodec.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCodecPoolGzipReuse() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (!ZlibFactory.isNativeZlibLoaded(conf)) {
    LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
    return;
  }
  GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  Compressor c1 = CodecPool.getCompressor(gzc);
  Compressor c2 = CodecPool.getCompressor(dfc);
  CodecPool.returnCompressor(c1);
  CodecPool.returnCompressor(c2);
  assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
 
Example 5
Source Project: big-c   Source File: TestRPC.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAuthorization() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      true);

  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);

  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
 
Example 6
Source Project: big-c   Source File: TestGroupsCaching.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCacheEntriesExpire() throws Exception {
  conf.setLong(
    CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
  FakeTimer timer = new FakeTimer();
  final Groups groups = new Groups(conf, timer);
  groups.cacheGroupsAdd(Arrays.asList(myGroups));
  groups.refresh();
  FakeGroupMapping.clearBlackList();

  // We make an entry
  groups.getGroups("me");
  int startingRequestCount = FakeGroupMapping.getRequestCount();

  timer.advance(20 * 1000);

  // Cache entry has expired so it results in a new fetch
  groups.getGroups("me");
  assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
 
Example 7
Source Project: big-c   Source File: TestGroupFallback.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGroupWithFallback() throws Exception {
  LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
      "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
      " test the fall back functionality");
  Logger.getRootLogger().setLevel(Level.DEBUG);
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");

  Groups groups = new Groups(conf);

  String username = System.getProperty("user.name");
  List<String> groupList = groups.getGroups(username);

  LOG.info(username + " has GROUPS: " + groupList.toString());
  assertTrue(groupList.size() > 0);
}
 
Example 8
Source Project: hadoop   Source File: TestZlibCompressorDecompressor.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example 9
Source Project: tez   Source File: TestPipelinedSorter.java    License: Apache License 2.0 6 votes vote down vote up
public static Configuration getConf() {
  Configuration conf = new Configuration();
  conf.set("fs.defaultFS", "file:///");
  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
  //To enable PipelinedSorter
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_SORTER_CLASS, SorterImpl.PIPELINED.name());

  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, HashPartitioner.class.getName());

  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, true);

  //Setup localdirs
  if (workDir != null) {
    String localDirs = workDir.toString();
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs);
  }
  return conf;
}
 
Example 10
Source Project: big-c   Source File: TestZlibCompressorDecompressor.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example 11
Source Project: hadoop   Source File: NfsExports.java    License: Apache License 2.0 6 votes vote down vote up
public static synchronized NfsExports getInstance(Configuration conf) {
  if (exports == null) {
    String matchHosts = conf.get(
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
    int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
    long expirationPeriodNano = conf.getLong(
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
    try {
      exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
    } catch (IllegalArgumentException e) {
      LOG.error("Invalid NFS Exports provided: ", e);
      return exports;
    }
  }
  return exports;
}
 
Example 12
Source Project: big-c   Source File: TestHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRequiresAuthorizationAccess() throws Exception {
  Configuration conf = new Configuration();
  ServletContext context = Mockito.mock(ServletContext.class);
  Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
  HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
  HttpServletResponse response = Mockito.mock(HttpServletResponse.class);

  //requires admin access to instrumentation, FALSE by default
  Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));

  //requires admin access to instrumentation, TRUE
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
  AccessControlList acls = Mockito.mock(AccessControlList.class);
  Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
  Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
  Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
}
 
Example 13
Source Project: hadoop   Source File: TestFileCreation.java    License: Apache License 2.0 5 votes vote down vote up
static FSDataOutputStream createNonRecursive(FileSystem fs, Path name,
    int repl, EnumSet<CreateFlag> flag) throws IOException {
  System.out.println("createNonRecursive: Created " + name + " with " + repl
      + " replica.");
  FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive(
      name, FsPermission.getDefault(), flag, fs.getConf().getInt(
          CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl,  blockSize, null);
  return stm;
}
 
Example 14
Source Project: big-c   Source File: RMHAUtils.java    License: Apache License 2.0 5 votes vote down vote up
private static HAServiceState getHAState(YarnConfiguration yarnConf)
    throws Exception {
  HAServiceTarget haServiceTarget;
  int rpcTimeoutForChecks =
      yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
          CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);

  yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
  haServiceTarget = new RMHAServiceTarget(yarnConf);
  HAServiceProtocol proto =
      haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
  HAServiceState haState = proto.getServiceStatus().getState();
  return haState;
}
 
Example 15
Source Project: hadoop-ozone   Source File: OMBucketCreateRequest.java    License: Apache License 2.0 5 votes vote down vote up
private BucketEncryptionInfoProto getBeinfo(
    KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo)
    throws IOException {
  BucketEncryptionInfoProto bek = bucketInfo.getBeinfo();
  BucketEncryptionInfoProto.Builder bekb = null;
  if (kmsProvider == null) {
    throw new OMException("Invalid KMS provider, check configuration " +
        CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
        OMException.ResultCodes.INVALID_KMS_PROVIDER);
  }
  if (bek.getKeyName() == null) {
    throw new OMException("Bucket encryption key needed.", OMException
        .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
  }
  // Talk to KMS to retrieve the bucket encryption key info.
  KeyProvider.Metadata metadata = kmsProvider.getMetadata(
      bek.getKeyName());
  if (metadata == null) {
    throw new OMException("Bucket encryption key " + bek.getKeyName()
        + " doesn't exist.",
        OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
  }
  // If the provider supports pool for EDEKs, this will fill in the pool
  kmsProvider.warmUpEncryptedKeys(bek.getKeyName());
  bekb = BucketEncryptionInfoProto.newBuilder()
      .setKeyName(bek.getKeyName())
      .setCryptoProtocolVersion(ENCRYPTION_ZONES)
      .setSuite(OMPBHelper.convert(
          CipherSuite.convert(metadata.getCipher())));
  return bekb.build();
}
 
Example 16
Source Project: big-c   Source File: HttpServer2.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Does the user sending the HttpServletRequest has the administrator ACLs? If
 * it isn't the case, response will be modified to send an error to the user.
 *
 * @param response used to send the error response if user does not have admin access.
 * @return true if admin-authorized, false otherwise
 * @throws IOException
 */
public static boolean hasAdministratorAccess(
    ServletContext servletContext, HttpServletRequest request,
    HttpServletResponse response) throws IOException {
  Configuration conf =
      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
  // If there is no authorization, anybody has administrator access.
  if (!conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    return true;
  }

  String remoteUser = request.getRemoteUser();
  if (remoteUser == null) {
    response.sendError(HttpServletResponse.SC_FORBIDDEN,
                       "Unauthenticated users are not " +
                       "authorized to access this page.");
    return false;
  }

  if (servletContext.getAttribute(ADMINS_ACL) != null &&
      !userHasAdministratorAccess(servletContext, remoteUser)) {
    response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
        + remoteUser + " is unauthorized to access this page.");
    return false;
  }

  return true;
}
 
Example 17
Source Project: big-c   Source File: HttpServer2.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Add default apps.
 * @param appDir The application directory
 * @throws IOException
 */
protected void addDefaultApps(ContextHandlerCollection parent,
    final String appDir, Configuration conf) throws IOException {
  // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
  String logDir = System.getProperty("hadoop.log.dir");
  if (logDir != null) {
    Context logContext = new Context(parent, "/logs");
    logContext.setResourceBase(logDir);
    logContext.addServlet(AdminAuthorizedServlet.class, "/*");
    if (conf.getBoolean(
        CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
        CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
      @SuppressWarnings("unchecked")
      Map<String, String> params = logContext.getInitParams();
      params.put(
          "org.mortbay.jetty.servlet.Default.aliases", "true");
    }
    logContext.setDisplayName("logs");
    setContextAttributes(logContext, conf);
    addNoCacheFilter(webAppContext);
    defaultContexts.put(logContext, true);
  }
  // set up the context for "/static/*"
  Context staticContext = new Context(parent, "/static");
  staticContext.setResourceBase(appDir + "/static");
  staticContext.addServlet(DefaultServlet.class, "/*");
  staticContext.setDisplayName("static");
  setContextAttributes(staticContext, conf);
  defaultContexts.put(staticContext, true);
}
 
Example 18
Source Project: Bats   Source File: SpnegoConfig.java    License: Apache License 2.0 5 votes vote down vote up
private UserGroupInformation loginAndReturnUgi() throws DrillException {

    validateSpnegoConfig();

    UserGroupInformation ugi;
    try {
      // Check if security is not enabled and try to set the security parameter to login the principal.
      // After the login is performed reset the static UGI state.
      if (!UserGroupInformation.isSecurityEnabled()) {
        final Configuration newConfig = new Configuration();
        newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
            UserGroupInformation.AuthenticationMethod.KERBEROS.toString());

        if (clientNameMapping != null) {
          newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, clientNameMapping);
        }

        UserGroupInformation.setConfiguration(newConfig);
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);

        // Reset the original configuration for static UGI
        UserGroupInformation.setConfiguration(new Configuration());
      } else {
        // Let's not overwrite the rules here since it might be possible that CUSTOM security is configured for
        // JDBC/ODBC with default rules. If Kerberos was enabled then the correct rules must already be set
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
      }
    } catch (Exception e) {
      throw new DrillException(String.format("Login failed for %s with given keytab", principal), e);
    }
    return ugi;
  }
 
Example 19
Source Project: hadoop   Source File: Lz4Codec.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create a {@link CompressionInputStream} that will read from the given
 * {@link InputStream} with the given {@link Decompressor}.
 *
 * @param in           the stream to read compressed bytes from
 * @param decompressor decompressor to use
 * @return a stream to read uncompressed bytes from
 * @throws IOException
 */
@Override
public CompressionInputStream createInputStream(InputStream in,
                                                Decompressor decompressor)
    throws IOException {
  if (!isNativeCodeLoaded()) {
    throw new RuntimeException("native lz4 library not available");
  }

  return new BlockDecompressorStream(in, decompressor, conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
}
 
Example 20
Source Project: hadoop   Source File: TestGroupsCaching.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  FakeGroupMapping.resetRequestCount();
  ExceptionalGroupMapping.resetRequestCount();

  conf = new Configuration();
  conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
    FakeGroupMapping.class,
    ShellBasedUnixGroupsMapping.class);
}
 
Example 21
Source Project: hadoop   Source File: TestFSInputChecker.java    License: Apache License 2.0 5 votes vote down vote up
private void testSeekAndRead(FileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(
      file,
      fileSys.getConf().getInt(
          CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
 
Example 22
Source Project: big-c   Source File: HAAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  if (conf != null) {
    rpcTimeoutForChecks = conf.getInt(
        CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
        CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
  }
}
 
Example 23
Source Project: big-c   Source File: TestUGILoginFromKeytab.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void startMiniKdc() throws Exception {
  // This setting below is required. If not enabled, UGI will abort
  // any attempt to loginUserFromKeytab.
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  workDir = folder.getRoot();
  kdc = new MiniKdc(MiniKdc.createConf(), workDir);
  kdc.start();
}
 
Example 24
@Override
public MiniRaftClusterWithHadoopRpc newCluster(int numPeers) {
  final Configuration conf = new Configuration();
  HadoopConfigKeys.Ipc.setHandlers(conf, 20);
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
  conf.setInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY, 1000);
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY, 1000);
  return MiniRaftClusterWithHadoopRpc.FACTORY.newCluster(
      numPeers, getProperties(), conf);
}
 
Example 25
Source Project: big-c   Source File: TestActiveStandbyElector.java    License: Apache License 2.0 5 votes vote down vote up
ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
    List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException,
    KeeperException {
  super(hostPort, timeout, parent, acl, Collections
      .<ZKAuthInfo> emptyList(), app,
      CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
}
 
Example 26
Source Project: hadoop   Source File: Lz4Codec.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create a new {@link Compressor} for use by this {@link CompressionCodec}.
 *
 * @return a new compressor for use by this codec
 */
@Override
public Compressor createCompressor() {
  if (!isNativeCodeLoaded()) {
    throw new RuntimeException("native lz4 library not available");
  }
  int bufferSize = conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
  boolean useLz4HC = conf.getBoolean(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
  return new Lz4Compressor(bufferSize, useLz4HC);
}
 
Example 27
Source Project: big-c   Source File: JournalNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;
  
  Configuration confCopy = new Configuration(conf);
  
  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);
  
  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);
  
  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
}
 
Example 28
Source Project: hadoop   Source File: TestActiveStandbyElectorRealZK.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  
  zkServer = getServer(serverFactory);

  for (int i = 0; i < NUM_ELECTORS; i++) {
    cbs[i] =  Mockito.mock(ActiveStandbyElectorCallback.class);
    appDatas[i] = Ints.toByteArray(i);
    electors[i] = new ActiveStandbyElector(hostPort, 5000, PARENT_DIR,
        Ids.OPEN_ACL_UNSAFE, Collections.<ZKAuthInfo> emptyList(), cbs[i],
        CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
  }
}
 
Example 29
Source Project: big-c   Source File: TestCodec.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCodecPoolCompressorReinit() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
    gzipReinitTest(conf, gzc);
  } else {
    LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded");
  }
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  gzipReinitTest(conf, dfc);
}
 
Example 30
Source Project: big-c   Source File: HttpServer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Does the user sending the HttpServletRequest has the administrator ACLs? If
 * it isn't the case, response will be modified to send an error to the user.
 * 
 * @param servletContext
 * @param request
 * @param response used to send the error response if user does not have admin access.
 * @return true if admin-authorized, false otherwise
 * @throws IOException
 */
public static boolean hasAdministratorAccess(
    ServletContext servletContext, HttpServletRequest request,
    HttpServletResponse response) throws IOException {
  Configuration conf =
      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
  // If there is no authorization, anybody has administrator access.
  if (!conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    return true;
  }

  String remoteUser = request.getRemoteUser();
  if (remoteUser == null) {
    response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
                       "Unauthenticated users are not " +
                       "authorized to access this page.");
    return false;
  }
  
  if (servletContext.getAttribute(ADMINS_ACL) != null &&
      !userHasAdministratorAccess(servletContext, remoteUser)) {
    response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
        + remoteUser + " is unauthorized to access this page.");
    return false;
  }

  return true;
}