org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units Java Examples

The following examples show how to use org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SourceHandlerTest.java    From metron with Apache License 2.0 6 votes vote down vote up
@Test
public void testRotateOutputFile() throws IOException {
  SourceHandler handler = new SourceHandler(
      rotActions,
      new FileSizeRotationPolicy(10000, Units.MB), // Don't actually care about the rotation
      new CountSyncPolicy(1),
      testFormat,
      callback
  );

  handler.rotateOutputFile();

  // Function should ensure rotation actions and callback are called.
  verify(rotAction1).execute(any(), any());
  verify(rotAction2).execute(any(), any());
  verify(callback).removeKey();
}
 
Example #2
Source File: MovingAvgLocalTopologyRunner.java    From hadoop-arch-book with Apache License 2.0 6 votes vote down vote up
/**
 * Create bolt which will persist ticks to HDFS.
 */
private static HdfsBolt createHdfsBolt() {

  // Use "|" instead of "," for field delimiter:
  RecordFormat format = new DelimitedRecordFormat()
    .withFieldDelimiter("|");
  // sync the filesystem after every 1k tuples:
  SyncPolicy syncPolicy = new CountSyncPolicy(100);

  // Rotate files when they reach 5MB:
  FileRotationPolicy rotationPolicy = 
    new FileSizeRotationPolicy(5.0f, Units.MB);

  // Write records to <user>/stock-ticks/ directory in HDFS:
  FileNameFormat fileNameFormat = new DefaultFileNameFormat()
    .withPath("stock-ticks/");

  HdfsBolt hdfsBolt = new HdfsBolt()
    .withFsUrl("hdfs://localhost:8020")
    .withFileNameFormat(fileNameFormat)
    .withRecordFormat(format)
    .withRotationPolicy(rotationPolicy)
    .withSyncPolicy(syncPolicy);

  return hdfsBolt;
}
 
Example #3
Source File: WARCHdfsBolt.java    From storm-crawler with Apache License 2.0 5 votes vote down vote up
public WARCHdfsBolt() {
    super();
    FileSizeRotationPolicy rotpol = new FileSizeRotationPolicy(1.0f,
            Units.GB);
    withRotationPolicy(rotpol);
    // dummy sync policy
    withSyncPolicy(new CountSyncPolicy(10));
    // default local filesystem
    withFsUrl("file:///");
}
 
Example #4
Source File: TopologyRunner.java    From opensoc-streaming with Apache License 2.0 4 votes vote down vote up
private boolean initializeHDFSBolt(String topology_name, String name) {
	try {

		String messageUpstreamComponent = messageComponents
				.get(messageComponents.size() - 1);

		System.out.println("[OpenSOC] ------" + name
				+ " is initializing from " + messageUpstreamComponent);

		RecordFormat format = new DelimitedRecordFormat()
				.withFieldDelimiter(
						config.getString("bolt.hdfs.field.delimiter")
								.toString()).withFields(
						new Fields("message"));

		// sync the file system after every x number of tuples
		SyncPolicy syncPolicy = new CountSyncPolicy(Integer.valueOf(config
				.getString("bolt.hdfs.batch.size").toString()));

		// rotate files when they reach certain size
		FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(
				Float.valueOf(config.getString(
						"bolt.hdfs.file.rotation.size.in.mb").toString()),
				Units.MB);

		FileNameFormat fileNameFormat = new DefaultFileNameFormat()
				.withPath(config.getString("bolt.hdfs.wip.file.path")
						.toString());

		// Post rotate action
		MoveFileAction moveFileAction = (new MoveFileAction())
				.toDestination(config.getString(
						"bolt.hdfs.finished.file.path").toString());

		HdfsBolt hdfsBolt = new HdfsBolt()
				.withFsUrl(
						config.getString("bolt.hdfs.file.system.url")
								.toString())
				.withFileNameFormat(fileNameFormat)
				.withRecordFormat(format)
				.withRotationPolicy(rotationPolicy)
				.withSyncPolicy(syncPolicy)
				.addRotationAction(moveFileAction);
		if (config.getString("bolt.hdfs.compression.codec.class") != null) {
			hdfsBolt.withCompressionCodec(config.getString(
					"bolt.hdfs.compression.codec.class").toString());
		}

		builder.setBolt(name, hdfsBolt,
				config.getInt("bolt.hdfs.parallelism.hint"))
				.shuffleGrouping(messageUpstreamComponent, "message")
				.setNumTasks(config.getInt("bolt.hdfs.num.tasks"));

	} catch (Exception e) {
		e.printStackTrace();
		System.exit(0);
	}

	return true;
}
 
Example #5
Source File: SequenceFileTopology.java    From storm-hdfs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setNumWorkers(1);

    SentenceSpout spout = new SentenceSpout();

    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // rotate files when they reach 5MB
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/source/")
            .withExtension(".seq");

    // create sequence format instance.
    DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");

    SequenceFileBolt bolt = new SequenceFileBolt()
            .withFsUrl(args[0])
            .withFileNameFormat(fileNameFormat)
            .withSequenceFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy)
            .withCompressionType(SequenceFile.CompressionType.RECORD)
            .withCompressionCodec("deflate")
            .addRotationAction(new MoveFileAction().toDestination("/dest/"));




    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
    // SentenceSpout --> MyBolt
    builder.setBolt(BOLT_ID, bolt, 4)
            .shuffleGrouping(SENTENCE_SPOUT_ID);


    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(120);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
        System.exit(0);
    } else if(args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    }
}