Java Code Examples for org.apache.spark.streaming.api.java.JavaDStream.print()

The following are Jave code examples for showing how to use print() of the org.apache.spark.streaming.api.java.JavaDStream class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: net.jgp.labs.spark   File: StreamingIngestionFileSystemTextFileApp.java   Source Code and License Vote up 6 votes
private void start() {
	// Create a local StreamingContext with two working thread and batch interval of
	// 1 second
	SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount");
	JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

	JavaDStream<String> msgDataStream = jssc.textFileStream(StreamingUtils.getInputDirectory());
	msgDataStream.print();

	jssc.start();
	try {
		jssc.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 2
Project: net.jgp.labs.spark   File: StreamingIngestionFileSystemTextFileToDataframeMultipleClassesApp.java   Source Code and License Vote up 6 votes
private void start() {
	// Create a local StreamingContext with two working thread and batch interval of
	// 1 second
	SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("Streaming Ingestion File System Text File to Dataframe");
	JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

	JavaDStream<String> msgDataStream = jssc.textFileStream(StreamingUtils.getInputDirectory());

	msgDataStream.print();
	// Create JavaRDD<Row>
	msgDataStream.foreachRDD(new RowProcessor());	

	jssc.start();
	try {
		jssc.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 3
Project: learning-spark-examples   File: StreamingLogInput.java   Source Code and License Vote up 6 votes
public static void main(String[] args) throws Exception {
	String master = args[0];
	JavaSparkContext sc = new JavaSparkContext(master, "StreamingLogInput");
   // Create a StreamingContext with a 1 second batch size
   JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(1000));
   // Create a DStream from all the input on port 7777
   JavaDStream<String> lines = jssc.socketTextStream("localhost", 7777);
   // Filter our DStream for lines with "error"
   JavaDStream<String> errorLines = lines.filter(new Function<String, Boolean>() {
       public Boolean call(String line) {
         return line.contains("error");
       }});
   // Print out the lines with errors, which causes this DStream to be evaluated
   errorLines.print();
   // start our streaming context and wait for it to "finish"
   jssc.start();
   // Wait for 10 seconds then exit. To run forever call without a timeout
   jssc.awaitTermination(10000);
   // Stop the streaming context
   jssc.stop();
}
 
Example 4
Project: Apache-Spark-2x-for-Java-Developers   File: FileStreamingEx.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
   	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
   	//Logger rootLogger = LogManager.getRootLogger();
  		//rootLogger.setLevel(Level.WARN); 
       SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
       String inputDirectory="E:\\hadoop\\streamFolder\\";
    
       JavaSparkContext sc = new JavaSparkContext(conf);
       JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(1));
      // streamingContext.checkpoint("E:\\hadoop\\checkpoint");
       Logger rootLogger = LogManager.getRootLogger();
  		rootLogger.setLevel(Level.WARN); 
  		
  		JavaDStream<String> streamfile = streamingContext.textFileStream(inputDirectory);
  		streamfile.print();
  		streamfile.foreachRDD(rdd-> rdd.foreach(x -> System.out.println(x)));
  		
  			   		
  		JavaPairDStream<LongWritable, Text> streamedFile = streamingContext.fileStream(inputDirectory, LongWritable.class, Text.class, TextInputFormat.class);
  	 streamedFile.print();
  		
  	 streamingContext.start();
  	 

       try {
		streamingContext.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 5
Project: incubator-pulsar   File: SparkStreamingPulsarReceiverExample.java   Source Code and License Vote up 5 votes
public static void main(String[] args) throws InterruptedException {
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("pulsar-spark");
    JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

    ClientConfiguration clientConf = new ClientConfiguration();
    ConsumerConfiguration consConf = new ConsumerConfiguration();
    String url = "pulsar://localhost:6650/";
    String topic = "persistent://sample/standalone/ns1/topic1";
    String subs = "sub1";

    JavaReceiverInputDStream<byte[]> msgs = jssc
            .receiverStream(new SparkStreamingPulsarReceiver(clientConf, consConf, url, topic, subs));

    JavaDStream<Integer> isContainingPulsar = msgs.flatMap(new FlatMapFunction<byte[], Integer>() {
        @Override
        public Iterator<Integer> call(byte[] msg) {
            return Arrays.asList(((new String(msg)).indexOf("Pulsar") != -1) ? 1 : 0).iterator();
        }
    });

    JavaDStream<Integer> numOfPulsar = isContainingPulsar.reduce(new Function2<Integer, Integer, Integer>() {
        @Override
        public Integer call(Integer i1, Integer i2) {
            return i1 + i2;
        }
    });

    numOfPulsar.print();

    jssc.start();
    jssc.awaitTermination();
}
 
Example 6
Project: nats-connector-spark   File: AbstractNatsToSparkTest.java   Source Code and License Vote up 5 votes
protected void validateTheReceptionOfMessages(JavaStreamingContext ssc,
		JavaReceiverInputDStream<String> stream) throws InterruptedException {
	JavaDStream<String> messages = stream.repartition(3);

	ExecutorService executor = Executors.newFixedThreadPool(6);

	final int nbOfMessages = 5;
	NatsPublisher np = getNatsPublisher(nbOfMessages);
	
	if (logger.isDebugEnabled()) {
		messages.print();
	}
	
	messages.foreachRDD(new VoidFunction<JavaRDD<String>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public void call(JavaRDD<String> rdd) throws Exception {
			logger.debug("RDD received: {}", rdd.collect());
			
			final long count = rdd.count();
			if ((count != 0) && (count != nbOfMessages)) {
				rightNumber = false;
				logger.error("The number of messages received should have been {} instead of {}.", nbOfMessages, count);
			}
			
			TOTAL_COUNT.getAndAdd((int) count);
			
			atLeastSomeData = atLeastSomeData || (count > 0);
			
			for (String str :rdd.collect()) {
				if (! str.startsWith(NatsPublisher.NATS_PAYLOAD)) {
						payload = str;
					}
			}
		}			
	});
	
	closeTheValidation(ssc, executor, nbOfMessages, np);		
}
 
Example 7
Project: nats-connector-spark   File: AbstractNatsToSparkTest.java   Source Code and License Vote up 5 votes
protected void validateTheReceptionOfIntegerMessages(JavaStreamingContext ssc, 
		JavaReceiverInputDStream<Integer> stream) throws InterruptedException {
	JavaDStream<Integer> messages = stream.repartition(3);

	ExecutorService executor = Executors.newFixedThreadPool(6);

	final int nbOfMessages = 5;
	NatsPublisher np = getNatsPublisher(nbOfMessages);
	
	if (logger.isDebugEnabled()) {
		messages.print();
	}
	
	messages.foreachRDD(new VoidFunction<JavaRDD<Integer>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public void call(JavaRDD<Integer> rdd) throws Exception {
			logger.debug("RDD received: {}", rdd.collect());
			
			final long count = rdd.count();
			if ((count != 0) && (count != nbOfMessages)) {
				rightNumber = false;
				logger.error("The number of messages received should have been {} instead of {}.", nbOfMessages, count);
			}
			
			TOTAL_COUNT.getAndAdd((int) count);
			
			atLeastSomeData = atLeastSomeData || (count > 0);
			
			for (Integer value :rdd.collect()) {
				if (value < NatsPublisher.NATS_PAYLOAD_INT) {
						payload = value.toString();
					}
			}
		}			
	});
	
	closeTheValidation(ssc, executor, nbOfMessages, np);
}
 
Example 8
Project: spark-cstar-canaries   File: Consumer.java   Source Code and License Vote up 5 votes
public void start() {
    final JavaStreamingContext context = new JavaStreamingContext(conf, checkpointInterval);

    // for graceful shutdown of the application ...
    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            System.out.println("Shutting down streaming app...");
            context.stop(true, true);
            System.out.println("Shutdown of streaming app complete.");
        }
    });

    JKinesisReceiver receiver = new JKinesisReceiver(appName, streamName,
                                                     endpointUrl, regionName,
                                                     checkpointInterval,
                                                     InitialPositionInStream.LATEST);

    JavaDStream<String> dstream = context.receiverStream(receiver);

    JavaDStream<EventRecord> recs = dstream.map(new EventRecordMapFunc());

    recs.print();

    // persist to DStream to Cassandra
    javaFunctions(recs)
        .writerBuilder("canary", "eventrecord", mapToRow(EventRecord.class))
        .saveToCassandra();


    System.out.println("Start Spark Stream Processing...");

    context.start();
    context.awaitTermination();

}
 
Example 9
Project: SparkOnALog   File: SparkStreamingFromNetworkExample.java   Source Code and License Vote up 5 votes
public static void main(String[] args) {
  if (args.length < 3) {
    System.err.println("Usage: NetworkWordCount <master> <hostname> <port>\n" +
        "In local mode, <master> should be 'local[n]' with n > 1");
    System.exit(1);
  }

  // Create the context with a 1 second batch size
  JavaStreamingContext ssc = new JavaStreamingContext(args[0], "NetworkWordCount",
          new Duration(5000), System.getenv("SPARK_HOME"), System.getenv("SPARK_EXAMPLES_JAR"));

  // Create a NetworkInputDStream on target ip:port and count the
  // words in input stream of \n delimited test (eg. generated by 'nc')
  JavaDStream<String> lines = ssc.socketTextStream(args[1], Integer.parseInt(args[2]));
  
  lines.map(new Function<String, String> () {

@Override
public String call(String arg0) throws Exception {
	System.out.println("arg0" + arg0);
	return arg0;
}}).print();
  
  lines.print();
  ssc.start();


}
 
Example 10
Project: Practical-Real-time-Processing-and-Analytics   File: JavaKafkaWordCount.java   Source Code and License Vote up 4 votes
@SuppressWarnings("serial")
public static void main(String[] args) throws InterruptedException {
//    if (args.length < 4) {
//      System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>");
//      System.exit(1);
//    }
	  args = new String[4];
    args[0]="localhost:2181";
    args[1]= "1";
    args[2]= "test";
    args[3]= "1";

    SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount").setMaster("spark://Impetus-NL163U:7077");
    // Create the context with a 1 second batch size
    JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(20000));

    int numThreads = Integer.parseInt(args[3]);
    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    String[] topics = args[2].split(",");
    for (String topic: topics) {
      topicMap.put(topic, numThreads);
    }

    JavaPairReceiverInputDStream<String, String> messages =
            KafkaUtils.createStream(jssc, args[0], args[1], topicMap);

    final JavaDStream<String> lines = messages.map(new Function<Tuple2<String,String>, String>() {
		@Override
		public String call(Tuple2<String, String> v1) throws Exception {
			ObjectMapper objectMapper = new ObjectMapper();
			objectMapper.configure(Feature.USE_ANNOTATIONS, false);
			Map<String,String> mapValue = objectMapper.readValue(v1._2(), new TypeReference<Map<String,String>>() {
			});
			Collection<String> values = mapValue.values();
			String finalString = "";
			for (Iterator<String> iterator = values.iterator(); iterator.hasNext();) {
				String value = iterator.next();
				if(finalString.length()==0){
					finalString = finalString +value;
				}else {
				finalString = finalString+","+ value;
				}
			}
			return finalString;
		}
	});
    
    lines.print();
    new Thread(){
    	public void run() {
    		while(true){
    			try {
					Thread.sleep(1000);
				} catch (InterruptedException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
    			System.out.println("#############################################################################"+lines.count());
    		}
    	};
    }.start();
    
    jssc.start();
    jssc.awaitTermination();
  }
 
Example 11
Project: Sparkathon   File: SQLonStreams.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {
    Logger.getLogger("org").setLevel(Level.WARN);
    Logger.getLogger("akka").setLevel(Level.WARN);

    final Pattern SPACE = Pattern.compile(" ");

    SparkConf conf = new SparkConf().setAppName("Big Apple").setMaster("local[2]");
    JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));

    JavaDStream<String> lines = ssc.textFileStream("src/main/resources/stream");
    lines.print();

    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterator<String> call(String x) {
            return Lists.newArrayList(SPACE.split(x)).iterator();
        }
    });

    words.foreachRDD(
            new VoidFunction2<JavaRDD<String>, Time>() {
                @Override
                public void call(JavaRDD<String> rdd, Time time) {

                    // Get the singleton instance of SQLContext
                    SQLContext sqlContext = SQLContext.getOrCreate(rdd.context());

                    // Convert RDD[String] to RDD[case class] to Dataset
                    JavaRDD<JavaRecord> rowRDD = rdd.map(new Function<String, JavaRecord>() {
                        public JavaRecord call(String word) {
                            JavaRecord record = new JavaRecord();
                            record.setWord(word);
                            return record;
                        }
                    });
                    Dataset<Row> wordsDataset = sqlContext.createDataFrame(rowRDD, JavaRecord.class);

                    // Register as table
                    wordsDataset.registerTempTable("words");

                    // Do word count on table using SQL and print it
                    Dataset wordCountsDataset =
                            sqlContext.sql("select word, count(*) as total from words group by word");
                    wordCountsDataset.show();
                }
            }
    );


    ssc.start();
    ssc.awaitTermination();

}
 
Example 12
Project: Sparkathon   File: Windowstream.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws Exception {

        final Pattern SPACE = Pattern.compile(" ");

        SparkConf conf = new SparkConf().setAppName("Big Apple").setMaster("local[2]");
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));

        JavaDStream<String> lines = ssc.textFileStream("src/main/resources/stream");
        lines.print();

        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String x) {
                return Lists.newArrayList(SPACE.split(x)).iterator();
            }
        });

        JavaPairDStream<String, Integer> wordsDstream = words.mapToPair(
                new PairFunction<String, String, Integer>() {
                    @Override
                    public Tuple2<String, Integer> call(String s) {
                        return new Tuple2<String, Integer>(s, 1);
                    }
                });

        wordsDstream.print();

        Function2<Integer, Integer, Integer> reduceFunc = new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer i1, Integer i2) {
                return i1 + i2;
            }
        };

        JavaPairDStream<String, Integer> windowedWordCounts = wordsDstream.reduceByKeyAndWindow(reduceFunc, Durations.seconds(30), Durations.seconds(10));

        windowedWordCounts.print();


        ssc.start();
        ssc.awaitTermination();

    }
 
Example 13
Project: net.jgp.labs.spark   File: StreamingIngestionFileSystemTextFileToDataframeApp.java   Source Code and License Vote up 4 votes
private void start() {
	// Create a local StreamingContext with two working thread and batch interval of
	// 1 second
	SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("Streaming Ingestion File System Text File to Dataframe");
	JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

	JavaDStream<String> msgDataStream = jssc.textFileStream(StreamingUtils.getInputDirectory());

	msgDataStream.print();
	// Create JavaRDD<Row>
	msgDataStream.foreachRDD(new VoidFunction<JavaRDD<String>>() {
		private static final long serialVersionUID = -590010339928376829L;

		@Override
		public void call(JavaRDD<String> rdd) {
			JavaRDD<Row> rowRDD = rdd.map(new Function<String, Row>() {
				private static final long serialVersionUID = 5167089361335095997L;

				@Override
				public Row call(String msg) {
					Row row = RowFactory.create(msg);
					return row;
				}
			});
			// Create Schema
			StructType schema = DataTypes.createStructType(
					new StructField[] { DataTypes.createStructField("Message", DataTypes.StringType, true) });
			
			// Get Spark 2.0 session
			SparkSession spark = JavaSparkSessionSingleton.getInstance(rdd.context().getConf());
			Dataset<Row> msgDataFrame = spark.createDataFrame(rowRDD, schema);
			msgDataFrame.show();
		}
	});

	jssc.start();
	try {
		jssc.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
Example 14
Project: spark-streaming-example   File: SparkStreamingExample.java   Source Code and License Vote up 4 votes
public static void main(String s[]) {
	StreamNumberServer.startNumberGeneratorServer(9999);

	// Create a local StreamingContext with two working thread and batch interval of 1 second
	SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("ConfigurableFilterApp");
	try (JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1))) {
		
		
		JavaReceiverInputDStream<String> lines = jssc.socketTextStream("localhost", 9999);
		
		JavaDStream<SensorData> values = lines.map(line -> SensorData.fromString(line));
		
		values = values.map(new CfgFunction());
		
		values.print();
		
		jssc.start();              // Start the computation
		jssc.awaitTermination();   // Wait for the computation to terminate
	} 
}
 
Example 15
Project: laughing-octo-sansa   File: KafkaSparkCassandraFlow.java   Source Code and License Vote up 4 votes
private void run(CompositeConfiguration conf) {
    // Kafka props
    String kafkaBrokers = conf.getString("metadata.broker.list");
    String topics = conf.getString("consumer.topic");
    String fromOffset = conf.getString("auto.offset.reset");

    // Spark props
    String sparkMaster = conf.getString("spark.master");
    long sparkStreamDuration = conf.getLong("stream.duration");

    // Cassandra props
    String cassandraKeyspace = "test";
    String cassandraTable = "kafka_logstream";
    String cassandraDbNode = conf.getString("cassandra.database.node");

    SparkConf sparkConf = new SparkConf().setAppName("Kafka Spark Cassandra Flow with Java API").setMaster(sparkMaster)
            .set("spark.cassandra.connection.host", cassandraDbNode);

    createDdl(sparkConf);

    JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(sparkStreamDuration));

    HashSet<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
    HashMap<String, String> kafkaParams = new HashMap<>();
    kafkaParams.put("metadata.broker.list", kafkaBrokers);
    kafkaParams.put("auto.offset.reset", fromOffset);

    // Create direct kafka stream with brokers and topics
    JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class, StringDecoder.class,
            StringDecoder.class, kafkaParams, topicsSet);

    // Get the lines
    // JavaPairDStream<UUID, String> lines = messages.mapToPair(tuple2 -> new Tuple2<UUID, String>(UUID.randomUUID(), tuple2._2()));
    // JavaDStream<String> lines = messages.map(tuple2 -> UUID.randomUUID() + "\t" + tuple2._2());
    JavaDStream<KafkaRowWithUUID> lines = messages.map(tuple2 -> new KafkaRowWithUUID(UUID.randomUUID(), tuple2._2()));
    lines.print();

    javaFunctions(lines).writerBuilder(cassandraKeyspace, cassandraTable, mapToRow(KafkaRowWithUUID.class)).saveToCassandra();

    // Start the computation
    jssc.start();
    jssc.awaitTermination();
}
 
Example 16
Project: spork-streaming   File: LoadConverter.java   Source Code and License Vote up 4 votes
@Override
public JavaDStream<Tuple> convert(List<JavaDStream<Tuple>> predecessorRdds, POLoad poLoad) throws IOException {
	//        if (predecessors.size()!=0) {
	//            throw new RuntimeException("Should not have predecessors for Load. Got : "+predecessors);
	//        }
	configureLoader(physicalPlan, poLoad, sparkContext.ssc().sc().hadoopConfiguration(),this.pigContext);

	Iterator<PhysicalOperator> top = physicalPlan.iterator();
	boolean isTwitter = false;
	while(top.hasNext()){    		
		String load = top.next().toString();

		if(load.contains("hdfs://")){
			String[] splitted = load.split("hdfs://");       		 
			String url = "hdfs://" + splitted[1];

			if(url.contains("/_twitter")){
				isTwitter = true;        			
			}
			break;

		}

	}

	if(!isTwitter){    	   

		DStream<Tuple2<Text, Tuple>> hadoopRDD= sparkContext.ssc().fileStream(poLoad.getLFile().getFileName(), 
				SparkUtil.getManifest(Text.class), 
				SparkUtil.getManifest(Tuple.class), 
				SparkUtil.getManifest(PigInputFormat.class));

		//hadoopRDD.print();
		/*
		JavaDStream<String> mhadoopRDD = sparkContext.textFileStream(poLoad.getLFile().getFileName());
		
		stringTupleFunction tf = new stringTupleFunction();
		
		JavaDStream<Tuple> lulz = mhadoopRDD.map(tf);
		
		//lulz.print();			
		
		return lulz;
		*/
		
		JavaDStream<Tuple> hdfsTuple = new JavaDStream<Tuple>(hadoopRDD.map(TO_VALUE_FUNCTION,SparkUtil.getManifest(Tuple.class)),SparkUtil.getManifest(Tuple.class));

		hdfsTuple.print();
		
		return hdfsTuple;
		
	}else{

		System.out.println("=====Tweeets-Tweets=======");
		System.setProperty("twitter4j.oauth.consumerKey","mGkece93BmDILkPXXXXX");
		System.setProperty("twitter4j.oauth.consumerSecret","K9RhnuOdZJlxDgxKJXXXXXXXXXXXXXXXXXXXXX");
		System.setProperty("twitter4j.oauth.accessToken","2493987XXXXXXXXXXXXXXXXXXXXXXXXXFPRs0Ho7");
		System.setProperty("twitter4j.oauth.accessTokenSecret","XXXXXXXXXXXXXXXXXXXXikQ0KxfqByVrtzs3jYP");
		//sparkContext.checkpoint("/home/akhld/mobi/temp/pig/twitter/");

		//JavaDStream<Status> dtweets= sparkContext.twitterStream();
		JavaDStream<Status> dtweets = TwitterUtils.createStream(sparkContext);

		System.out.println("=====Tweeets-Tweets=======");

		tweetFunction fnc = new tweetFunction();
		DStream<Tuple> dstatuses = dtweets.dstream().map(fnc,SparkUtil.getManifest(Tuple.class));				

		dstatuses.print();

		JavaDStream<Tuple> tweetTuple = new JavaDStream<Tuple>(dstatuses, SparkUtil.getManifest(Tuple.class));

		return tweetTuple;

	}

}
 
Example 17
Project: Sparkathon   File: Basestream.java   Source Code and License Vote up 3 votes
public static void main(String[] args) throws Exception {

        SparkConf conf = new SparkConf().setAppName("Big Apple").setMaster("local[2]");
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));

        JavaDStream<String> stringJavaDStream = ssc.textFileStream("src/main/resources/stream");
        stringJavaDStream.print();

        ssc.start();
        ssc.awaitTermination();

    }