org.apache.hadoop.fs.CommonConfigurationKeys Java Examples

The following examples show how to use org.apache.hadoop.fs.CommonConfigurationKeys. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSaslRPC.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testPingInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
  conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
      CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);

  // set doPing to true
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
  ConnectionId remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
      remoteId.getPingInterval());
  // set doPing to false
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
  remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(0, remoteId.getPingInterval());
}
 
Example #2
Source File: TestZlibCompressorDecompressor.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example #3
Source File: TestRPC.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testAuthorization() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      true);

  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);

  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
 
Example #4
Source File: TestPipelinedSorter.java    From tez with Apache License 2.0 6 votes vote down vote up
public static Configuration getConf() {
  Configuration conf = new Configuration();
  conf.set("fs.defaultFS", "file:///");
  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
  //To enable PipelinedSorter
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_SORTER_CLASS, SorterImpl.PIPELINED.name());

  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, HashPartitioner.class.getName());

  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, true);

  //Setup localdirs
  if (workDir != null) {
    String localDirs = workDir.toString();
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs);
  }
  return conf;
}
 
Example #5
Source File: TestGroupsCaching.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testCacheEntriesExpire() throws Exception {
  conf.setLong(
    CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
  FakeTimer timer = new FakeTimer();
  final Groups groups = new Groups(conf, timer);
  groups.cacheGroupsAdd(Arrays.asList(myGroups));
  groups.refresh();
  FakeGroupMapping.clearBlackList();

  // We make an entry
  groups.getGroups("me");
  int startingRequestCount = FakeGroupMapping.getRequestCount();

  timer.advance(20 * 1000);

  // Cache entry has expired so it results in a new fetch
  groups.getGroups("me");
  assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
 
Example #6
Source File: TestCodec.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testCodecPoolGzipReuse() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (!ZlibFactory.isNativeZlibLoaded(conf)) {
    LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
    return;
  }
  GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  Compressor c1 = CodecPool.getCompressor(gzc);
  Compressor c2 = CodecPool.getCompressor(dfc);
  CodecPool.returnCompressor(c1);
  CodecPool.returnCompressor(c2);
  assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
 
Example #7
Source File: TestServiceAuthorization.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultAcl() {
  ServiceAuthorizationManager serviceAuthorizationManager = 
      new ServiceAuthorizationManager();
  Configuration conf = new Configuration ();
  // test without setting a default acl
  conf.set(ACL_CONFIG, "user1 group1");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());

  // test with a default acl
  conf.set(
      CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, 
      "user2 group2");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals("user2 group2", acl.getAclString());
}
 
Example #8
Source File: TestGroupFallback.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testGroupWithFallback() throws Exception {
  LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
      "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
      " test the fall back functionality");
  Logger.getRootLogger().setLevel(Level.DEBUG);
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");

  Groups groups = new Groups(conf);

  String username = System.getProperty("user.name");
  List<String> groupList = groups.getGroups(username);

  LOG.info(username + " has GROUPS: " + groupList.toString());
  assertTrue(groupList.size() > 0);
}
 
Example #9
Source File: NfsExports.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static synchronized NfsExports getInstance(Configuration conf) {
  if (exports == null) {
    String matchHosts = conf.get(
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
    int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
    long expirationPeriodNano = conf.getLong(
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
    try {
      exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
    } catch (IllegalArgumentException e) {
      LOG.error("Invalid NFS Exports provided: ", e);
      return exports;
    }
  }
  return exports;
}
 
Example #10
Source File: TestZlibCompressorDecompressor.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example #11
Source File: TestHttpServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testRequiresAuthorizationAccess() throws Exception {
  Configuration conf = new Configuration();
  ServletContext context = Mockito.mock(ServletContext.class);
  Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
  HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
  HttpServletResponse response = Mockito.mock(HttpServletResponse.class);

  //requires admin access to instrumentation, FALSE by default
  Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));

  //requires admin access to instrumentation, TRUE
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
  AccessControlList acls = Mockito.mock(AccessControlList.class);
  Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
  Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
  Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
}
 
Example #12
Source File: DFSAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example #13
Source File: TestSwitchMapping.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify the cached mapper delegates the switch mapping query to the inner
 * mapping, which again handles arbitrary DNSToSwitchMapping implementations
 *
 * @throws Throwable on any problem
 */
@Test
public void testCachingRelaysStringOperations() throws Throwable {
  Configuration conf = new Configuration();
  String scriptname = "mappingscript.sh";
  conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
           scriptname);
  ScriptBasedMapping scriptMapping = new ScriptBasedMapping(conf);
  assertTrue("Did not find " + scriptname + " in " + scriptMapping,
             scriptMapping.toString().contains(scriptname));
  CachedDNSToSwitchMapping mapping =
      new CachedDNSToSwitchMapping(scriptMapping);
  assertTrue("Did not find " + scriptname + " in " + mapping,
             mapping.toString().contains(scriptname));
}
 
Example #14
Source File: TestInjectionForSimulatedStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void writeFile(FileSystem fileSys, Path name, int repl)
                                              throws IOException {
  // create and write a file that contains three blocks of data
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, blockSize);
  byte[] buffer = new byte[filesize];
  for (int i=0; i<buffer.length; i++) {
    buffer[i] = '1';
  }
  stm.write(buffer);
  stm.close();
}
 
Example #15
Source File: SnappyCodec.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Create a {@link CompressionOutputStream} that will write to the given
 * {@link OutputStream} with the given {@link Compressor}.
 *
 * @param out        the location for the final output stream
 * @param compressor compressor to use
 * @return a stream the user can write uncompressed data to have it compressed
 * @throws IOException
 */
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
                                                  Compressor compressor)
    throws IOException {
  checkNativeCodeLoaded();
  int bufferSize = conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);

  int compressionOverhead = (bufferSize / 6) + 32;

  return new BlockCompressorStream(out, compressor, bufferSize,
      compressionOverhead);
}
 
Example #16
Source File: TestRPC.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Switch off setting socketTimeout values on RPC sockets.
 * Verify that RPC calls still work ok.
 */
public void testNoPings() throws IOException {
  Configuration conf = new Configuration();
  
  conf.setBoolean("ipc.client.ping", false);
  new TestRPC().testCallsInternal(conf);
  
  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  new TestRPC().testCallsInternal(conf);
}
 
Example #17
Source File: CompressionCodecFactory.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of codecs discovered via a Java ServiceLoader, or
 * listed in the configuration. Codecs specified in configuration come
 * later in the returned list, and are considered to override those
 * from the ServiceLoader.
 * @param conf the configuration to look in
 * @return a list of the {@link CompressionCodec} classes
 */
public static List<Class<? extends CompressionCodec>> getCodecClasses(
    Configuration conf) {
  List<Class<? extends CompressionCodec>> result
    = new ArrayList<Class<? extends CompressionCodec>>();
  // Add codec classes discovered via service loading
  synchronized (CODEC_PROVIDERS) {
    // CODEC_PROVIDERS is a lazy collection. Synchronize so it is
    // thread-safe. See HADOOP-8406.
    for (CompressionCodec codec : CODEC_PROVIDERS) {
      result.add(codec.getClass());
    }
  }
  // Add codec classes from configuration
  String codecsString = conf.get(
      CommonConfigurationKeys.IO_COMPRESSION_CODECS_KEY);
  if (codecsString != null) {
    StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
    while (codecSplit.hasMoreElements()) {
      String codecSubstring = codecSplit.nextToken().trim();
      if (codecSubstring.length() != 0) {
        try {
          Class<?> cls = conf.getClassByName(codecSubstring);
          if (!CompressionCodec.class.isAssignableFrom(cls)) {
            throw new IllegalArgumentException("Class " + codecSubstring +
                                               " is not a CompressionCodec");
          }
          result.add(cls.asSubclass(CompressionCodec.class));
        } catch (ClassNotFoundException ex) {
          throw new IllegalArgumentException("Compression codec " + 
                                             codecSubstring + " not found.",
                                             ex);
        }
      }
    }
  }
  return result;
}
 
Example #18
Source File: TestFailoverController.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverFromNonExistantServiceWithFencer() throws Exception {
  DummyHAService svc1 = spy(new DummyHAService(null, svc1Addr));
  // Getting a proxy to a dead server will throw IOException on call,
  // not on creation of the proxy.
  HAServiceProtocol errorThrowingProxy = Mockito.mock(HAServiceProtocol.class,
      Mockito.withSettings()
        .defaultAnswer(new ThrowsException(
            new IOException("Could not connect to host")))
        .extraInterfaces(Closeable.class));
  Mockito.doNothing().when((Closeable)errorThrowingProxy).close();

  Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(
      Mockito.<Configuration>any(),
      Mockito.anyInt());
  DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
  svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());

  try {
    doFailover(svc1, svc2, false, false);
  } catch (FailoverFailedException ffe) {
    fail("Non-existant active prevented failover");
  }
  // Verify that the proxy created to try to make it go to standby
  // gracefully used the right rpc timeout
  Mockito.verify(svc1).getProxy(
      Mockito.<Configuration>any(),
      Mockito.eq(
        CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
      
  // Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
  assertEquals(HAServiceState.ACTIVE, svc2.state);
}
 
Example #19
Source File: TestCodec.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testGzipCodecRead() throws IOException {
  // Create a gzipped file and try to read it back, using a decompressor
  // from the CodecPool.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  assertFalse("ZlibFactory is using native libs against request",
      ZlibFactory.isNativeZlibLoaded(conf));

  // Ensure that the CodecPool has a BuiltInZlibInflater in it.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  CodecPool.returnDecompressor(zlibDecompressor);

  // Now create a GZip text file.
  String tmpDir = System.getProperty("test.build.data", "/tmp/");
  Path f = new Path(new Path(tmpDir), "testGzipCodecRead.txt.gz");
  BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
    new GZIPOutputStream(new FileOutputStream(f.toString()))));
  final String msg = "This is the message in the file!";
  bw.write(msg);
  bw.close();

  // Now read it back, using the CodecPool to establish the
  // decompressor to use.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(f);
  Decompressor decompressor = CodecPool.getDecompressor(codec);
  FileSystem fs = FileSystem.getLocal(conf);
  InputStream is = fs.open(f);
  is = codec.createInputStream(is, decompressor);
  BufferedReader br = new BufferedReader(new InputStreamReader(is));
  String line = br.readLine();
  assertEquals("Didn't get the same message back!", msg, line);
  br.close();
}
 
Example #20
Source File: TestStartup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
Example #21
Source File: TestSetTimes.java    From big-c with Apache License 2.0 5 votes vote down vote up
private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  return stm;
}
 
Example #22
Source File: TestFcHdfsSetUMask.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException  {
  Configuration conf = new HdfsConfiguration();
  // set permissions very restrictive
  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "077");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  fc = FileContext.getFileContext(cluster.getURI(0), conf);
  defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
 
Example #23
Source File: TestGridmixSummary.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ClusterSummarizer}.
 */
@Test  (timeout=20000)
public void testClusterSummarizer() throws IOException {
  ClusterSummarizer cs = new ClusterSummarizer();
  Configuration conf = new Configuration();
  
  String jt = "test-jt:1234";
  String nn = "test-nn:5678";
  conf.set(JTConfig.JT_IPC_ADDRESS, jt);
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, nn);
  cs.start(conf);
  
  assertEquals("JT name mismatch", jt, cs.getJobTrackerInfo());
  assertEquals("NN name mismatch", nn, cs.getNamenodeInfo());
  
  ClusterStats cStats = ClusterStats.getClusterStats();
  conf.set(JTConfig.JT_IPC_ADDRESS, "local");
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "local");
  JobClient jc = new JobClient(conf);
  cStats.setClusterMetric(jc.getClusterStatus());
  
  cs.update(cStats);
  
  // test
  assertEquals("Cluster summary test failed!", 1, cs.getMaxMapTasks());
  assertEquals("Cluster summary test failed!", 1, cs.getMaxReduceTasks());
  assertEquals("Cluster summary test failed!", 1, cs.getNumActiveTrackers());
  assertEquals("Cluster summary test failed!", 0, 
               cs.getNumBlacklistedTrackers());
}
 
Example #24
Source File: TestUnorderedKVEdgeConfig.java    From tez with Apache License 2.0 5 votes vote down vote up
@Test (timeout=2000)
public void testDefaultConfigsUsed() {
  UnorderedKVEdgeConfig.Builder builder =
      UnorderedKVEdgeConfig.newBuilder("KEY", "VALUE");
  builder.setKeySerializationClass("SerClass1", null).setValueSerializationClass("SerClass2", null);

  UnorderedKVEdgeConfig configuration = builder.build();


  UnorderedKVOutputConfig rebuiltOutput =
      new UnorderedKVOutputConfig();
  rebuiltOutput.fromUserPayload(configuration.getOutputPayload());
  UnorderedKVInputConfig rebuiltInput =
      new UnorderedKVInputConfig();
  rebuiltInput.fromUserPayload(configuration.getInputPayload());

  Configuration outputConf = rebuiltOutput.conf;
  assertEquals(true, outputConf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_IFILE_READAHEAD,
      TezRuntimeConfiguration.TEZ_RUNTIME_IFILE_READAHEAD_DEFAULT));
  assertEquals("TestCodec",
      outputConf.get(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS_CODEC, ""));
  assertTrue(outputConf.get(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY).startsWith
      ("SerClass2,SerClass1"));

  Configuration inputConf = rebuiltInput.conf;
  assertEquals(true, inputConf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_IFILE_READAHEAD,
      TezRuntimeConfiguration.TEZ_RUNTIME_IFILE_READAHEAD_DEFAULT));
  assertEquals("TestCodec",
      inputConf.get(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS_CODEC, ""));
  assertTrue(inputConf.get(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY).startsWith
      ("SerClass2,SerClass1"));
}
 
Example #25
Source File: Client.java    From big-c with Apache License 2.0 5 votes vote down vote up
ConnectionId(InetSocketAddress address, Class<?> protocol, 
             UserGroupInformation ticket, int rpcTimeout,
             RetryPolicy connectionRetryPolicy, Configuration conf) {
  this.protocol = protocol;
  this.address = address;
  this.ticket = ticket;
  this.rpcTimeout = rpcTimeout;
  this.connectionRetryPolicy = connectionRetryPolicy;

  this.maxIdleTime = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
  this.maxRetriesOnSasl = conf.getInt(
      CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
      CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
  this.maxRetriesOnSocketTimeouts = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.tcpNoDelay = conf.getBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT);
  this.doPing = conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
      CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT);
  this.pingInterval = (doPing ? Client.getPingInterval(conf) : 0);
  this.conf = conf;
}
 
Example #26
Source File: TestViewFileSystemHdfs.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  SupportsBlocks = true;
  CONF.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  
  cluster =
      new MiniDFSCluster.Builder(CONF).nnTopology(
              MiniDFSNNTopology.simpleFederatedTopology(2))
          .numDataNodes(2)
          .build();
  cluster.waitClusterUp();
  
  fHdfs = cluster.getFileSystem(0);
  fHdfs2 = cluster.getFileSystem(1);
  fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
      FsConstants.VIEWFS_URI.toString());
  fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
      FsConstants.VIEWFS_URI.toString());

  defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  
  fHdfs.mkdirs(defaultWorkingDirectory);
  fHdfs2.mkdirs(defaultWorkingDirectory2);
}
 
Example #27
Source File: TestReplication.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  // create and write a file that contains three blocks of data
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
Example #28
Source File: TestNameEditsConfigs.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void writeFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, BLOCK_SIZE);
  byte[] buffer = new byte[FILE_SIZE];
  Random rand = new Random(SEED);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
Example #29
Source File: TestHDFSEventSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void testKerbFileAccess() throws InterruptedException,
    LifecycleException, EventDeliveryException, IOException {
  LOG.debug("Starting testKerbFileAccess() ...");
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  String newPath = testPath + "/singleBucket";
  String kerbConfPrincipal = "user1/[email protected]";
  String kerbKeytab = "/usr/lib/flume/nonexistkeytabfile";

  //turn security on
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);

  Context context = new Context();
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.kerberosPrincipal", kerbConfPrincipal);
  context.put("hdfs.kerberosKeytab", kerbKeytab);

  try {
    Configurables.configure(sink, context);
    Assert.fail("no exception thrown");
  } catch (IllegalArgumentException expected) {
    Assert.assertTrue(expected.getMessage().contains(
        "is nonexistent or can't read."));
  } finally {
    //turn security off
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
        "simple");
    UserGroupInformation.setConfiguration(conf);
  }
}
 
Example #30
Source File: TestDistCpWithAcls.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the cluster, wait for it to become active, and get FileSystem.
 *
 * @param format if true, format the NameNode and DataNodes before starting up
 * @param aclsEnabled if true, ACL support is enabled
 * @throws Exception if any step fails
 */
private static void initCluster(boolean format, boolean aclsEnabled)
    throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled);
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
  conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
    .build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
}