Java Code Examples for org.apache.log4j.LogManager#getRootLogger()

The following examples show how to use org.apache.log4j.LogManager#getRootLogger() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Log4jLoggerAdapter.java    From dubbox with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Log4jLoggerAdapter() {
	try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
           if (logger != null) {
               Enumeration<Appender> appenders = logger.getAllAppenders();
               if (appenders != null) {
                   while (appenders.hasMoreElements()) {
                       Appender appender = appenders.nextElement();
                       if (appender instanceof FileAppender) {
                           FileAppender fileAppender = (FileAppender)appender;
                           String filename = fileAppender.getFile();
                           file = new File(filename);
                           break;
                       }
                   }
               }
           }
       } catch (Throwable t) {
       }
}
 
Example 2
Source File: Log4jLoggerAdapter.java    From dubbox with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Log4jLoggerAdapter() {
	try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
           if (logger != null) {
               Enumeration<Appender> appenders = logger.getAllAppenders();
               if (appenders != null) {
                   while (appenders.hasMoreElements()) {
                       Appender appender = appenders.nextElement();
                       if (appender instanceof FileAppender) {
                           FileAppender fileAppender = (FileAppender)appender;
                           String filename = fileAppender.getFile();
                           file = new File(filename);
                           break;
                       }
                   }
               }
           }
       } catch (Throwable t) {
       }
}
 
Example 3
Source File: LogPageHandler.java    From dubbox with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public LogPageHandler() {
    try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
        if (logger != null) {
            Enumeration<Appender> appenders = logger.getAllAppenders();
            if (appenders != null) {
                while (appenders.hasMoreElements()) {
                    Appender appender = appenders.nextElement();
                    if (appender instanceof FileAppender) {
                        FileAppender fileAppender = (FileAppender)appender;
                        String filename = fileAppender.getFile();
                        file = new File(filename);
                        break;
                    }
                }
            }
        }
    } catch (Throwable t) {
    }
   }
 
Example 4
Source File: LoggerUtil.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
/**
 * Adds Logger Appenders
 * @param logger Logger to add appender to, if null, use root logger
 * @param names Names of appender
 * @param args Args with properties
 * @param propertySeparator Property separator
 * @return True if all of the appenders have been added successfully
 */
public static boolean addAppenders(Logger logger, String[] names, String args, String propertySeparator)
{
  if (names == null || args == null || names.length == 0 || propertySeparator == null) {
    throw new IllegalArgumentException("Incorrect appender parametrs");
  }
  boolean status = true;
  try {
    Properties properties = new Properties();
    properties.load(new StringReader(args.replaceAll(propertySeparator, "\n")));
    if (logger == null) {
      logger = LogManager.getRootLogger();
    }
    for (String name : names) {
      if (!addAppender(logger, name, properties)) {
        status = false;
      }
    }
  } catch (IOException ex) {
    ;
  }
  return status;
}
 
Example 5
Source File: Log4jLoggerAdapter.java    From dubbox-hystrix with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Log4jLoggerAdapter() {
	try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
           if (logger != null) {
               Enumeration<Appender> appenders = logger.getAllAppenders();
               if (appenders != null) {
                   while (appenders.hasMoreElements()) {
                       Appender appender = appenders.nextElement();
                       if (appender instanceof FileAppender) {
                           FileAppender fileAppender = (FileAppender)appender;
                           String filename = fileAppender.getFile();
                           file = new File(filename);
                           break;
                       }
                   }
               }
           }
       } catch (Throwable t) {
       }
}
 
Example 6
Source File: WordCountTransformOpEx.java    From Apache-Spark-2x-for-Java-Developers with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  
      System.setProperty("hadoop.home.dir", "E:\\hadoop");
	
   SparkConf sparkConf = new SparkConf().setAppName("WordCountSocketEx").setMaster("local[*]");
   JavaStreamingContext streamingContext = new JavaStreamingContext(sparkConf, Durations.seconds(1));
   Logger rootLogger = LogManager.getRootLogger();
 		rootLogger.setLevel(Level.WARN); 
   List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<>("hello", 10), new Tuple2<>("world", 10));
   JavaPairRDD<String, Integer> initialRDD = streamingContext.sparkContext().parallelizePairs(tuples);
	    

   JavaReceiverInputDStream<String> StreamingLines = streamingContext.socketTextStream( "10.0.75.1", Integer.parseInt("9000"), StorageLevels.MEMORY_AND_DISK_SER);
   
   JavaDStream<String> words = StreamingLines.flatMap( str -> Arrays.asList(str.split(" ")).iterator() );
  
   JavaPairDStream<String, Integer> wordCounts = words.mapToPair(str-> new Tuple2<>(str, 1)).reduceByKey((count1,count2) ->count1+count2 );
  
   wordCounts.print();
   
JavaPairDStream<String, Integer> joinedDstream = wordCounts
		.transformToPair(new Function<JavaPairRDD<String, Integer>, JavaPairRDD<String, Integer>>() {
			@Override
			public JavaPairRDD<String, Integer> call(JavaPairRDD<String, Integer> rdd) throws Exception {
				JavaPairRDD<String, Integer> modRDD = rdd.join(initialRDD).mapToPair(
						new PairFunction<Tuple2<String, Tuple2<Integer, Integer>>, String, Integer>() {
							@Override
							public Tuple2<String, Integer> call(
									Tuple2<String, Tuple2<Integer, Integer>> joinedTuple) throws Exception {
								return new Tuple2<>(joinedTuple._1(),(joinedTuple._2()._1() + joinedTuple._2()._2()));
							}
						});
				return modRDD;
			}
		});

   joinedDstream.print();
   streamingContext.start();
   streamingContext.awaitTermination();
 }
 
Example 7
Source File: InferenceExamples.java    From rya with Apache License 2.0 5 votes vote down vote up
public static void setupLogging() {
    final Logger rootLogger = LogManager.getRootLogger();
    final ConsoleAppender ca = (ConsoleAppender) rootLogger.getAppender("stdout");
    ca.setLayout(new PatternLayout("%d{MMM dd yyyy HH:mm:ss} %5p [%t] (%F:%L) - %m%n"));
    rootLogger.setLevel(Level.INFO);
    // Filter out noisy messages from the following classes.
    Logger.getLogger(ClientCnxn.class).setLevel(Level.OFF);
    Logger.getLogger(EmbeddedMongoFactory.class).setLevel(Level.OFF);
}
 
Example 8
Source File: SFTPFileSystemLoggingTest.java    From sftp-fs with Apache License 2.0 5 votes vote down vote up
@BeforeAll
public static void setupLogging() {
    logger = LogManager.getLogger(SFTPLoggerTest.class.getPackage().getName());
    originalLevel = logger.getLevel();
    logger.setLevel(Level.TRACE);
    originalAppenders = getAllAppenders(logger);
    logger.removeAllAppenders();

    Logger root = LogManager.getRootLogger();
    originalRootAppenders = getAllAppenders(root);
    root.removeAllAppenders();
}
 
Example 9
Source File: TestWorkPreservingRMRestart.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = getConf();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
  conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
  DefaultMetricsSystem.setMiniClusterMode(true);
}
 
Example 10
Source File: TestRMDelegationTokens.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  ExitUtil.disableSystemExit();
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
}
 
Example 11
Source File: FileStreamingEx.java    From Apache-Spark-2x-for-Java-Developers with MIT License 5 votes vote down vote up
public static void main(String[] args) {
   	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
   	//Logger rootLogger = LogManager.getRootLogger();
  		//rootLogger.setLevel(Level.WARN); 
       SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
       String inputDirectory="E:\\hadoop\\streamFolder\\";
    
       JavaSparkContext sc = new JavaSparkContext(conf);
       JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(1));
      // streamingContext.checkpoint("E:\\hadoop\\checkpoint");
       Logger rootLogger = LogManager.getRootLogger();
  		rootLogger.setLevel(Level.WARN); 
  		
  		JavaDStream<String> streamfile = streamingContext.textFileStream(inputDirectory);
  		streamfile.print();
  		streamfile.foreachRDD(rdd-> rdd.foreach(x -> System.out.println(x)));
  		
  			   		
  		JavaPairDStream<LongWritable, Text> streamedFile = streamingContext.fileStream(inputDirectory, LongWritable.class, Text.class, TextInputFormat.class);
  	 streamedFile.print();
  		
  	 streamingContext.start();
  	 

       try {
		streamingContext.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 12
Source File: TestRM.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetNewAppId() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM(conf);
  rm.start();
  
  GetNewApplicationResponse resp = rm.getNewAppId();
  assert (resp.getApplicationId().getId() != 0);    
  assert (resp.getMaximumResourceCapability().getMemory() > 0);    
  rm.stop();
}
 
Example 13
Source File: XMLFileOperations.java    From Apache-Spark-2x-for-Java-Developers with MIT License 5 votes vote down vote up
public static void main(String[] args) {
	 System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
		
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
      .appName("JavaALSExample")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN); 

	
	HashMap<String, String> params = new HashMap<String, String>();
	params.put("rowTag", "food");
	params.put("failFast", "true");
	 Dataset<Row> docDF = sparkSession.read()
			                   .format("com.databricks.spark.xml")
			                   .options(params)
			                   .load("C:/Users/sumit.kumar/git/learning/src/main/resources/breakfast_menu.xml");
	 
	 docDF.printSchema();		 
	 docDF.show();
	 
	 docDF.write().format("com.databricks.spark.xml")
	    .option("rootTag", "food")
	    .option("rowTag", "food")
	    .save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMenu.xml");

}
 
Example 14
Source File: TestContainerResourceUsage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
}
 
Example 15
Source File: LoggerUtil.java    From Bats with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a list names of the appenders
 * @param logger Logger to list appender for, if null, use root logger
 * @return Names of the appenders
 */
public static List<String> getAppendersNames(Logger logger)
{
  if (logger == null) {
    logger = LogManager.getRootLogger();
  }
  Enumeration enumeration = logger.getAllAppenders();
  List<String> names = new LinkedList<>();
  while (enumeration.hasMoreElements()) {
    names.add(((Appender)enumeration.nextElement()).getName());
  }
  return names;
}
 
Example 16
Source File: LoggerUtil.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
public static LogFileInformation getLogFileInformation(Logger logger)
{
  if (logger == null) {
    logger = LogManager.getRootLogger();
  }
  FileAppender fileAppender = getFileAppender(logger);
  if (fileAppender != null) {
    File logFile = new File(fileAppender.getFile());
    LogFileInformation logFileInfo = new LogFileInformation(fileAppender.getFile(), logFile.length());
    return logFileInfo;
  }
  return null;
}
 
Example 17
Source File: ThreadLocalLogLevelManager.java    From olat with Apache License 2.0 5 votes vote down vote up
/**
 * Installs the ThreadLogManager in this system.
 * <p>
 * Note that this can fail if some other framework has done a call to LogManager.setRepositorySelector with a guard already.
 * 
 * @param logMessageModifier
 *            optional implementation of LogMessageModifier which allows messages to be modified should they be affected by a threadlocal loglevel overwrite. This
 *            allows for example for messages to be prepended with a token so that they can be easier found in the log
 */
void install(final LogMessageModifier logMessageModifier) {

    try {
        final LoggerFactory loggerFactory = new LoggerFactory() {

            @SuppressWarnings("synthetic-access")
            @Override
            public Logger makeNewLoggerInstance(String name) {
                return new ThreadLocalAwareLogger(name, threadLocalLogLevel_, logMessageModifier);
            }
        };

        final Logger originalRootLogger = LogManager.getRootLogger();

        final LoggerRepository parentRepository = originalRootLogger.getLoggerRepository();

        final LoggerRepository repository = new ThreadLocalAwareLoggerRepository(originalRootLogger, parentRepository, loggerFactory);

        LogManager.setRepositorySelector(new RepositorySelector() {

            @Override
            public LoggerRepository getLoggerRepository() {
                return repository;
            }
        }, guard);
    } catch (IllegalArgumentException re) {
        // thrown by LogManager.setRepositorySelector
        log.error("Could not install ThreadLocalLogLevelManager", re);
    }

}
 
Example 18
Source File: WindowBatchInterval.java    From Apache-Spark-2x-for-Java-Developers with MIT License 4 votes vote down vote up
public static void main(String[] args) {
   	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
   	//Logger rootLogger = LogManager.getRootLogger();
  		//rootLogger.setLevel(Level.WARN); 
       SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
       
    
       JavaSparkContext sc = new JavaSparkContext(conf);
       JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.minutes(2));
       streamingContext.checkpoint("E:\\hadoop\\checkpoint");
       Logger rootLogger = LogManager.getRootLogger();
  		rootLogger.setLevel(Level.WARN); 
  		
  	 List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<>("hello", 10), new Tuple2<>("world", 10));
    JavaPairRDD<String, Integer> initialRDD = streamingContext.sparkContext().parallelizePairs(tuples);
		    

    JavaReceiverInputDStream<String> StreamingLines = streamingContext.socketTextStream( "10.0.75.1", Integer.parseInt("9000"), StorageLevels.MEMORY_AND_DISK_SER);
    
    JavaDStream<String> words = StreamingLines.flatMap( str -> Arrays.asList(str.split(" ")).iterator() );
   
    JavaPairDStream<String, Integer> wordCounts = words.mapToPair(str-> new Tuple2<>(str, 1)).reduceByKey((count1,count2) ->count1+count2 );
   
    wordCounts.print();
    wordCounts.window(Durations.minutes(8)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
    wordCounts.window(Durations.minutes(8),Durations.minutes(2)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
    wordCounts.window(Durations.minutes(12),Durations.minutes(8)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
    wordCounts.window(Durations.minutes(2),Durations.minutes(2)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
    wordCounts.window(Durations.minutes(12),Durations.minutes(12)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
      
    //comment these two operation to make it run
    wordCounts.window(Durations.minutes(5),Durations.minutes(2)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
    wordCounts.window(Durations.minutes(10),Durations.minutes(1)).countByValue()
      .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
      
       streamingContext.start();
       try {
		streamingContext.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 19
Source File: JsonFileOperations.java    From Apache-Spark-2x-for-Java-Developers with MIT License 4 votes vote down vote up
public static void main(String[] args) {
	System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
	Logger rootLogger = LogManager.getRootLogger();
	rootLogger.setLevel(Level.WARN); 
	      SparkSession sparkSession = SparkSession
	      .builder()
	      .master("local")
		  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
	      .appName("JavaALSExample")
	      .getOrCreate();
	      
	   RDD<String> textFile = sparkSession.sparkContext().textFile("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_json.json",2); 
	   
	   JavaRDD<PersonDetails> mapParser = textFile.toJavaRDD().map(v1 -> new ObjectMapper().readValue(v1, PersonDetails.class));
	   
	   mapParser.foreach(t -> System.out.println(t)); 
	  
	   Dataset<Row> anotherPeople = sparkSession.read().json(textFile);
	   
	   anotherPeople.printSchema();
	   anotherPeople.show();
	      
	      
	      Dataset<Row> json_rec = sparkSession.read().json("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_json.json");
	      json_rec.printSchema();
	      
	      json_rec.show();
	      
	      StructType schema = new StructType( new StructField[] {
	    	            DataTypes.createStructField("cid", DataTypes.IntegerType, true),
	    	            DataTypes.createStructField("county", DataTypes.StringType, true),
	    	            DataTypes.createStructField("firstName", DataTypes.StringType, true),
	    	            DataTypes.createStructField("sex", DataTypes.StringType, true),
	    	            DataTypes.createStructField("year", DataTypes.StringType, true),
	    	            DataTypes.createStructField("dateOfBirth", DataTypes.TimestampType, true) });
	      
	    /*  StructType pep = new StructType(new StructField[] {
					new StructField("Count", DataTypes.StringType, true, Metadata.empty()),
					new StructField("County", DataTypes.StringType, true, Metadata.empty()),
					new StructField("First Name", DataTypes.StringType, true, Metadata.empty()),
					new StructField("Sex", DataTypes.StringType, true, Metadata.empty()),
					new StructField("Year", DataTypes.StringType, true, Metadata.empty()),
				    new StructField("timestamp", DataTypes.TimestampType, true, Metadata.empty()) });*/
	      
	     Dataset<Row> person_mod = sparkSession.read().schema(schema).json(textFile);
	     
	     person_mod.printSchema();
	     person_mod.show();
	     
	     person_mod.write().format("json").mode("overwrite").save("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_out.json");

}
 
Example 20
Source File: UDFExample.java    From Apache-Spark-2x-for-Java-Developers with MIT License 4 votes vote down vote up
public static void main(String[] args) {
	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
	
	 //Build a Spark Session	
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/hadoop/warehouse")
      .appName("EdgeBuilder")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
	  rootLogger.setLevel(Level.WARN); 
	// Read the CSV data
		 Dataset<Row> emp_ds = sparkSession.read()
				 .format("com.databricks.spark.csv")
   		         .option("header", "true")
   		         .option("inferSchema", "true")
   		         .load("src/main/resources/employee.txt");    
    		
	    UDF2 calcDays=new CalcDaysUDF();
	  //Registering the UDFs in Spark Session created above      
	    sparkSession.udf().register("calcDays", calcDays, DataTypes.LongType);
	    
	    emp_ds.createOrReplaceTempView("emp_ds");
	    
	    emp_ds.printSchema();
	    emp_ds.show();
	    
	    sparkSession.sql("select calcDays(hiredate,'dd-MM-yyyy') from emp_ds").show();   
	    //Instantiate UDAF
	    AverageUDAF calcAvg= new AverageUDAF();
	    //Register UDAF to SparkSession
	    sparkSession.udf().register("calAvg", calcAvg);
	    //Use UDAF
	    sparkSession.sql("select deptno,calAvg(salary) from emp_ds group by deptno ").show(); 
	   
	    //
	    TypeSafeUDAF typeSafeUDAF=new TypeSafeUDAF();
	    
	    Dataset<Employee> emf = emp_ds.as(Encoders.bean(Employee.class));
	    emf.printSchema();
	    emf.show();
	    
	    TypedColumn<Employee, Double> averageSalary = typeSafeUDAF.toColumn().name("averageTypeSafe");
	    Dataset<Double> result = emf.select(averageSalary);
	   result.show();
	    

}