Java Code Examples for org.apache.spark.api.java.JavaSparkContext#sc()

The following examples show how to use org.apache.spark.api.java.JavaSparkContext#sc() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GarmadonSparkListenerIntegrationTest.java    From garmadon with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
    eventHandler = mock(TriConsumer.class);
    header = Header.newBuilder()
            .withId("id")
            .addTag(Header.Tag.STANDALONE.name())
            .withHostname("host")
            .withUser("user")
            .withPid("pid")
            .buildSerializedHeader();

    SparkListernerConf.getInstance().setConsumer(eventHandler);
    SparkListernerConf.getInstance().setHeader(header);

    jsc = new JavaSparkContext(
            new SparkConf()
                    .setAppName("TestGarmadonListener")
                    .setMaster("local[1]")
                    .set("spark.driver.allowMultipleContexts", "true")
    );
    sc = jsc.sc();

    sparkListener = new GarmadonSparkListener();
    sc.addSparkListener(sparkListener);
}
 
Example 2
Source File: TestWebServiceGet.java    From quetzal with Eclipse Public License 2.0 6 votes vote down vote up
public static void main( String[] args )
   {   	
//   	SparkConf conf = new SparkConf().setAppName("App-mt").setMaster("local[2]");
//      	SparkConf conf = new SparkConf().setAppName("App-mt").setMaster("spark://Kavithas-MBP.home:7077");
	SparkConf conf = new SparkConf().setAppName("App-mt").setMaster("spark://kavithas-mbp.watson.ibm.com:7077");
   
   	JavaSparkContext sc = new JavaSparkContext(conf);
   	
   	HiveContext sqlContext = new HiveContext(sc.sc());
   	Dataset urls = sqlContext.read().json("/tmp/urls.json");

   	urls.registerTempTable("urls");
   	Dataset<Row> temp = sqlContext.sql("select * from urls");
   	temp.show();
   	
	   	sqlContext.sql("add jar /tmp/quetzal.jar");
	sqlContext.sql("create temporary function webservice as 'com.ibm.research.rdf.store.utilities.WebServiceGetUDTF'");
	Dataset<Row> drugs = sqlContext.sql("select webservice(\"drug,id,action\", \"url\", \"\", \"GET\", \"xs=http://www.w3.org/2001/XMLSchema\", \"//row\",\"drug\",\"./drug\","
			+ " \"<string>\", \"id\", \"./id\",\"<string>\", \"action\", \"./action\", \"<string>\", url) as (drug, drug_typ, id, id_typ, action, action_typ) from urls");
	drugs.show();
	System.out.println("Num rows:" + drugs.count());
   }
 
Example 3
Source File: CaseWhenTest.java    From BigDataPlatform with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args) {
	SparkConf conf = new SparkConf()
			.setMaster("local") 
			.setAppName("CaseWhenTest");
	JavaSparkContext sc = new JavaSparkContext(conf);
	SQLContext sqlContext = new SQLContext(sc.sc());
	
	List<Integer> grades = Arrays.asList(85, 90, 60, 73);
	JavaRDD<Integer> gradesRDD = sc.parallelize(grades);
	JavaRDD<Row> gradeRowsRDD = gradesRDD.map(new Function<Integer, Row>() {

		private static final long serialVersionUID = 1L;

		@Override
		public Row call(Integer grade) throws Exception {
			return RowFactory.create(grade);
		}
		
	});
	
	StructType schema = DataTypes.createStructType(Arrays.asList(
			DataTypes.createStructField("grade", DataTypes.IntegerType, true)));
	Dataset<Row> gradesDF = sqlContext.createDataFrame(gradeRowsRDD, schema);
	gradesDF.registerTempTable("grades");

	Dataset<Row>  gradeLevelDF = sqlContext.sql(
			"SELECT CASE "
				+ "WHEN grade>=90 THEN 'A' "
				+ "WHEN grade>=80 THEN 'B' "
				+ "WHEN grade>=70 THEN 'C' "
				+ "WHEN grade>=60 THEN 'D' "
				+ "ELSE 'E' "
				+ "END gradeLevel "
			+ "FROM grades");
	
	gradeLevelDF.show();
	
	sc.close(); 
}
 
Example 4
Source File: IfTest.java    From BigDataPlatform with GNU General Public License v3.0 5 votes vote down vote up
public static void main(String[] args) {
	SparkConf conf = new SparkConf()
			.setMaster("local") 
			.setAppName("IfTest");
	JavaSparkContext sc = new JavaSparkContext(conf);
	SQLContext sqlContext = new SQLContext(sc.sc());
	
	List<Integer> grades = Arrays.asList(85, 90, 60, 73);
	JavaRDD<Integer> gradesRDD = sc.parallelize(grades);
	JavaRDD<Row> gradeRowsRDD = gradesRDD.map(new Function<Integer, Row>() {

		private static final long serialVersionUID = 1L;

		@Override
		public Row call(Integer grade) throws Exception {
			return RowFactory.create(grade);
		}
		
	});
	
	StructType schema = DataTypes.createStructType(Arrays.asList(
			DataTypes.createStructField("grade", DataTypes.IntegerType, true)));
	Dataset<Row> gradesDF = sqlContext.createDataFrame(gradeRowsRDD, schema);
	gradesDF.registerTempTable("grades");

	Dataset<Row> gradeLevelDF = sqlContext.sql(
			"SELECT IF(grade>=80,'GOOD','BAD') gradeLevel "  
			+ "FROM grades");
	
	gradeLevelDF.show();
	
	sc.close(); 
}
 
Example 5
Source File: HiveDataSource.java    From SparkDemo with MIT License 5 votes vote down vote up
public static void main(String[] args) {
	
	/*
	 * 0.把hive里面的hive-site.xml放到spark/conf目录下
	 * 1.启动Mysql
	 * 2.启动HDFS
	 * 3.启动Hive ./hive
	 * 4.初始化HiveContext
	 * 5.打包运行
	 * 
	 * ./bin/spark-submit --master yarn-cluster --class com.huangyueran.spark.sql.HiveDataSource /root/spark_hive_datasource.jar
	 * ./bin/spark-submit --master yarn-client --class com.huangyueran.spark.sql.HiveDataSource /root/spark_hive_datasource.jar 
	 */
	
	JavaSparkContext sc = SparkUtils.getRemoteSparkContext(HiveDataSource.class);
	// 创建HiveContext,注意,这里,它接收的是SparkContext作为参数,不是JavaSparkContext,其实也可以使用JavaSparkContext,只不过内部也是做了sc.sc()的操作
       // HiveContext hiveContext = new HiveContext(sc.sc()); // 已过时 官方建议使用SparkSession
	SparkSession sparkSession = new SparkSession(sc.sc());
	Dataset<Row> person = sparkSession.sql("show databases");
       person.show();
       
       List<Row> list = person.javaRDD().collect();
       System.out.println("=============================================================");
       for(Row r:list){
       	System.out.println(r);
       }
       System.out.println("=============================================================");
       
	sc.close();
}
 
Example 6
Source File: SparkRefine.java    From p3-batchrefine with Apache License 2.0 5 votes vote down vote up
public SparkRefine() {
    LogManager.getRootLogger().setLevel(Level.ERROR);
    fLogger.setLevel(Level.INFO);
    SparkConf sparkConfiguration = new SparkConf(true);
    sparkConfiguration.setAppName(APP_NAME);
    sparkConfiguration.setMaster(sparkConfiguration.get("spark.master", "local"));
    sparkConfiguration.set("spark.task.cpus", sparkConfiguration.get("spark.executor.cores", "1"));
    sparkContext = new JavaSparkContext(sparkConfiguration);
    new ConsoleProgressBar(sparkContext.sc());
}
 
Example 7
Source File: FlightSparkContext.java    From flight-spark-source with Apache License 2.0 4 votes vote down vote up
public static FlightSparkContext flightContext(JavaSparkContext sc) {
  return new FlightSparkContext(sc.sc(), sc.getConf());
}
 
Example 8
Source File: SQLQueryBAM.java    From ViraPipe with MIT License 4 votes vote down vote up
public static void main(String[] args) throws IOException {
  SparkConf conf = new SparkConf().setAppName("SQLQueryBAM");

  JavaSparkContext sc = new JavaSparkContext(conf);
  SQLContext sqlContext = new HiveContext(sc.sc());

  Options options = new Options();
  Option opOpt = new Option( "out", true, "HDFS path for output files. If not present, the output files are not moved to HDFS." );
  Option queryOpt = new Option( "query", true, "SQL query string." );
  Option baminOpt = new Option( "in", true, "" );

  options.addOption( opOpt );
  options.addOption( queryOpt );
  options.addOption( baminOpt );
  CommandLineParser parser = new BasicParser();
  CommandLine cmd = null;
  try {
    cmd = parser.parse( options, args );

  }
  catch( ParseException exp ) {
    System.err.println( "Parsing failed.  Reason: " + exp.getMessage() );
  }

  String bwaOutDir = (cmd.hasOption("out")==true)? cmd.getOptionValue("out"):null;
  String query = (cmd.hasOption("query")==true)? cmd.getOptionValue("query"):null;
  String bamin = (cmd.hasOption("in")==true)? cmd.getOptionValue("in"):null;

  sc.hadoopConfiguration().setBoolean(BAMInputFormat.KEEP_PAIRED_READS_TOGETHER_PROPERTY, true);

  //Read BAM/SAM from HDFS
  JavaPairRDD<LongWritable, SAMRecordWritable> bamPairRDD = sc.newAPIHadoopFile(bamin, AnySAMInputFormat.class, LongWritable.class, SAMRecordWritable.class, sc.hadoopConfiguration());
  //Map to SAMRecord RDD
  JavaRDD<SAMRecord> samRDD = bamPairRDD.map(v1 -> v1._2().get());
  JavaRDD<MyAlignment> rdd = samRDD.map(bam -> new MyAlignment(bam.getReadName(), bam.getStart(), bam.getReferenceName(), bam.getReadLength(), new String(bam.getReadBases(), StandardCharsets.UTF_8), bam.getCigarString(), bam.getReadUnmappedFlag(), bam.getDuplicateReadFlag()));

  Dataset<Row> samDF = sqlContext.createDataFrame(rdd, MyAlignment.class);
  samDF.registerTempTable(tablename);
  if(query!=null) {

    //Save as parquet file
    Dataset df2 = sqlContext.sql(query);
    df2.show(100,false);

    if(bwaOutDir!=null)
      df2.write().parquet(bwaOutDir);

  }else{
    if(bwaOutDir!=null)
      samDF.write().parquet(bwaOutDir);
  }

  sc.stop();

}
 
Example 9
Source File: SparkLauncher.java    From spork with Apache License 2.0 4 votes vote down vote up
private static void startSparkIfNeeded() throws PigException {
        if (sparkContext == null) {
            String master = System.getenv("SPARK_MASTER");
            if (master == null) {
                LOG.info("SPARK_MASTER not specified, using \"local\"");
                master = "local";
            }

            String sparkHome = System.getenv("SPARK_HOME"); // It's okay if this
            // is null for local
            // mode
            String sparkJarsSetting = System.getenv("SPARK_JARS");
            String pigJar = System.getenv("SPARK_PIG_JAR");
            String[] sparkJars = sparkJarsSetting == null ? new String[] {}
                    : sparkJarsSetting.split(",");

            // TODO: Don't hardcode this JAR
            List<String> jars = Lists.asList(pigJar, sparkJars);

            if (!master.startsWith("local") && !master.equals("yarn-client")) {
                // Check that we have the Mesos native library and Spark home
                // are set
                if (sparkHome == null) {
                    System.err
                            .println("You need to set SPARK_HOME to run on a Mesos cluster!");
                    throw new PigException("SPARK_HOME is not set");
                }
                /*
                 * if (System.getenv("MESOS_NATIVE_LIBRARY") == null) {
                 *
                 * System.err.println(
                 * "You need to set MESOS_NATIVE_LIBRARY to run on a Mesos cluster!"
                 * ); throw new PigException("MESOS_NATIVE_LIBRARY is not set");
                 * }
                 *
                 * // Tell Spark to use Mesos in coarse-grained mode (only
                 * affects Spark 0.6+; no impact on others)
                 * System.setProperty("spark.mesos.coarse", "true");
                 */
            }

//            // For coarse-grained Mesos mode, tell it an upper bound on how many
//            // cores to grab in total;
//            // we conservatively set this to 32 unless the user set the
//            // SPARK_MAX_CPUS environment variable.
//            if (System.getenv("SPARK_MAX_CPUS") != null) {
//                int maxCores = 32;
//                maxCores = Integer.parseInt(System.getenv("SPARK_MAX_CPUS"));
//                System.setProperty("spark.cores.max", "" + maxCores);
//            }
//            System.setProperty("spark.cores.max", "1");
//            System.setProperty("spark.executor.memory", "" + "512m");
//            System.setProperty("spark.shuffle.memoryFraction", "0.0");
//            System.setProperty("spark.storage.memoryFraction", "0.0");

            JavaSparkContext javaContext = new JavaSparkContext(master,
                    "Spork", sparkHome, jars.toArray(new String[jars.size()]));
            sparkContext = javaContext.sc();
            sparkContext.addSparkListener(new StatsReportListener());
            sparkContext.addSparkListener(new JobLogger());
            // cacheConverter = new CacheConverter();
        }
    }
 
Example 10
Source File: SparkDl4jMultiLayer.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
/**
 * Training constructor. Instantiate with a configuration
 *
 * @param sc   the spark context to use
 * @param conf the configuration of the network
 */
public SparkDl4jMultiLayer(JavaSparkContext sc, MultiLayerConfiguration conf, TrainingMaster<?, ?> trainingMaster) {
    this(sc.sc(), conf, trainingMaster);
}