Java Code Examples for org.apache.hadoop.test.GenericTestUtils#setLogLevel()

The following examples show how to use org.apache.hadoop.test.GenericTestUtils#setLogLevel() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testdbTypeNotSet() throws IOException {

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
  GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
  GenericTestUtils.LogCapturer logCapturer =
      GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase() + "-test");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
      .setCreateIfMissing(true).setDbFile(dbDir).build();
  assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" +
      " " + storeImpl));
  dbStore.close();
  dbStore.destroy();
  FileUtils.deleteDirectory(dbDir);

}
 
Example 2
Source File: TestSecureOzoneCluster.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Tests the secure om Initialization success.
 */
@Test
public void testSecureOmInitializationSuccess() throws Exception {
  initSCM();
  // Create a secure SCM instance as om client will connect to it
  scm = StorageContainerManager.createSCM(conf);
  LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger());
  GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO);

  setupOm(conf);
  try {
    om.start();
  } catch (Exception ex) {
    // Expects timeout failure from scmClient in om but om user login via
    // kerberos should succeed.
    assertTrue(logs.getOutput().contains("Ozone Manager login successful"));
  }
}
 
Example 3
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testMetaStoreConfigDifferentFromType() throws IOException {

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
  String dbType;
  GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
  GenericTestUtils.LogCapturer logCapturer =
      GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
  if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
    dbType = "RocksDB";
  } else {
    dbType = "LevelDB";
  }

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + dbType.toLowerCase() + "-test");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
      .setCreateIfMissing(true).setDbFile(dbDir).setDBType(dbType).build();
  assertTrue(logCapturer.getOutput().contains("Using dbType " + dbType + "" +
      " for metastore"));
  dbStore.close();
  dbStore.destroy();
  FileUtils.deleteDirectory(dbDir);

}
 
Example 4
Source File: TestOzoneClientKeyGenerator.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Create a MiniDFSCluster for testing.
 *
 * @throws IOException
 */
@Before
public void setup() {
  path = GenericTestUtils
      .getTempPath(TestOzoneClientKeyGenerator.class.getSimpleName());
  GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG);
  GenericTestUtils.setLogLevel(RaftServerImpl.LOG, Level.DEBUG);
  File baseDir = new File(path);
  baseDir.mkdirs();
}
 
Example 5
Source File: TestHadoopDirTreeGenerator.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  path = GenericTestUtils
          .getTempPath(TestOzoneClientKeyGenerator.class.getSimpleName());
  GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG);
  GenericTestUtils.setLogLevel(RaftServerImpl.LOG, Level.DEBUG);
  File baseDir = new File(path);
  baseDir.mkdirs();
}
 
Example 6
Source File: TestBlockDeletion.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void init() throws Exception {
  conf = new OzoneConfiguration();
  GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG);
  GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);

  String path =
      GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName());
  File baseDir = new File(path);
  baseDir.mkdirs();

  conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
      TimeUnit.MILLISECONDS);
  conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
      TimeUnit.MILLISECONDS);
  conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
      TimeUnit.MILLISECONDS);
  conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
  conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
      3, TimeUnit.SECONDS);
  conf.setQuietMode(false);
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(3)
      .setHbInterval(200)
      .build();
  cluster.waitForClusterToBeReady();
  store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
  om = cluster.getOzoneManager();
  scm = cluster.getStorageContainerManager();
  containerIdsWithDeletedBlocks = new HashSet<>();
}
 
Example 7
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
 
Example 8
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void setNameNodeLoggingLevel(Level logLevel) {
  LOG.fatal("Log level = " + logLevel.toString());
  // change log level to NameNode logs
  DFSTestUtil.setNameNodeLogLevel(logLevel);
  GenericTestUtils.setLogLevel(LogManager.getLogger(
          NetworkTopology.class.getName()), logLevel);
  GenericTestUtils.setLogLevel(LogManager.getLogger(
          Groups.class.getName()), logLevel);
}
 
Example 9
Source File: TestBlockScanner.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
 
Example 10
Source File: TestBlockScanner.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
 
Example 11
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
 
Example 12
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 5 votes vote down vote up
static void setNameNodeLoggingLevel(Level logLevel) {
  LOG.fatal("Log level = " + logLevel.toString());
  // change log level to NameNode logs
  DFSTestUtil.setNameNodeLogLevel(logLevel);
  GenericTestUtils.setLogLevel(LogManager.getLogger(
          NetworkTopology.class.getName()), logLevel);
  GenericTestUtils.setLogLevel(LogManager.getLogger(
          Groups.class.getName()), logLevel);
}
 
Example 13
Source File: TestLogLevel.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Run both client and server using the given protocol.
 *
 * @param bindProtocol specify either http or https for server
 * @param connectProtocol specify either http or https for client
 * @param isSpnego true if SPNEGO is enabled
 * @throws Exception if client can't accesss server.
 */
private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol,
    final boolean isSpnego, final String newLevel)
    throws Exception {
  if (!LogLevel.isValidProtocol(bindProtocol)) {
    throw new Exception("Invalid server protocol " + bindProtocol);
  }
  if (!LogLevel.isValidProtocol(connectProtocol)) {
    throw new Exception("Invalid client protocol " + connectProtocol);
  }
  Level oldLevel = log.getEffectiveLevel();
  assertNotEquals("Get default Log Level which shouldn't be ERROR.",
      Level.ERROR, oldLevel);

  // configs needed for SPNEGO at server side
  if (isSpnego) {
    serverConf.set(PRINCIPAL, HTTP_PRINCIPAL);
    serverConf.set(KEYTAB, keyTabFile.getAbsolutePath());
    serverConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    serverConf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    UserGroupInformation.setConfiguration(serverConf);
  } else {
    serverConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
    serverConf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
    UserGroupInformation.setConfiguration(serverConf);
  }

  final HttpServer server = createServer(bindProtocol, isSpnego);
  // get server port
  final String authority = NetUtils.getHostPortString(server.getConnectorAddress(0));

  String keytabFilePath = keyTabFile.getAbsolutePath();

  UserGroupInformation clientUGI = UserGroupInformation.
      loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFilePath);
  try {
    clientUGI.doAs((PrivilegedExceptionAction<Void>) () -> {
      // client command line
      getLevel(connectProtocol, authority);
      setLevel(connectProtocol, authority, newLevel);
      return null;
    });
  } finally {
    clientUGI.logoutUserFromKeytab();
    server.stop();
  }

  // restore log level
  GenericTestUtils.setLogLevel(log, oldLevel);
}
 
Example 14
Source File: TestPipelines.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static void initLoggers() {
  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
  GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
 
Example 15
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static void initLoggers() {
  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
  GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(BlockReportTestBase.LOG, Level.ALL);
}
 
Example 16
Source File: TestWebHdfsWithMultipleNameNodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);

  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
 
Example 17
Source File: TestPipelines.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static void initLoggers() {
  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
  GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
 
Example 18
Source File: BlockReportTestBase.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static void initLoggers() {
  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
  GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(BlockReportTestBase.LOG, Level.ALL);
}
 
Example 19
Source File: TestWebHdfsWithMultipleNameNodes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);

  DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
 
Example 20
Source File: TestSecureOzoneCluster.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Tests delegation token renewal.
 */
@Test
public void testDelegationTokenRenewal() throws Exception {
  GenericTestUtils
      .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO);
  LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger());

  // Setup secure OM for start.
  OzoneConfiguration newConf = new OzoneConfiguration(conf);
  int tokenMaxLifetime = 1000;
  newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime);
  setupOm(newConf);
  long omVersion =
      RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
  OzoneManager.setTestSecureOmFlag(true);
  // Start OM

  try {
    om.setCertClient(new CertificateClientTestImpl(conf));
    om.start();

    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    // Get first OM client which will authenticate via Kerberos
    omClient = new OzoneManagerProtocolClientSideTranslatorPB(
        OmTransportFactory.create(conf, ugi, null),
        RandomStringUtils.randomAscii(5));

    // Since client is already connected get a delegation token
    Token<OzoneTokenIdentifier> token = omClient.getDelegationToken(
        new Text("om"));

    // Check if token is of right kind and renewer is running om instance
    assertNotNull(token);
    assertEquals("OzoneToken", token.getKind().toString());
    assertEquals(OmUtils.getOmRpcAddress(conf),
        token.getService().toString());

    // Renew delegation token
    long expiryTime = omClient.renewDelegationToken(token);
    assertTrue(expiryTime > 0);
    omLogs.clearOutput();

    // Test failure of delegation renewal
    // 1. When token maxExpiryTime exceeds
    Thread.sleep(tokenMaxLifetime);
    OMException ex = LambdaTestUtils.intercept(OMException.class,
        "TOKEN_EXPIRED",
        () -> omClient.renewDelegationToken(token));
    assertEquals(TOKEN_EXPIRED, ex.getResult());
    omLogs.clearOutput();

    // 2. When renewer doesn't match (implicitly covers when renewer is
    // null or empty )
    Token<OzoneTokenIdentifier> token2 = omClient.getDelegationToken(
        new Text("randomService"));
    assertNotNull(token2);
    LambdaTestUtils.intercept(OMException.class,
        "Delegation token renewal failed",
        () -> omClient.renewDelegationToken(token2));
    assertTrue(omLogs.getOutput().contains(" with non-matching " +
        "renewer randomService"));
    omLogs.clearOutput();

    // 3. Test tampered token
    OzoneTokenIdentifier tokenId = OzoneTokenIdentifier.readProtoBuf(
        token.getIdentifier());
    tokenId.setRenewer(new Text("om"));
    tokenId.setMaxDate(System.currentTimeMillis() * 2);
    Token<OzoneTokenIdentifier> tamperedToken = new Token<>(
        tokenId.getBytes(), token2.getPassword(), token2.getKind(),
        token2.getService());
    LambdaTestUtils.intercept(OMException.class,
        "Delegation token renewal failed",
        () -> omClient.renewDelegationToken(tamperedToken));
    assertTrue(omLogs.getOutput().contains("can't be found in " +
        "cache"));
    omLogs.clearOutput();

  } finally {
    om.stop();
    om.join();
  }
}