org.apache.storm.hdfs.bolt.format.FileNameFormat Java Examples

The following examples show how to use org.apache.storm.hdfs.bolt.format.FileNameFormat. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MovingAvgLocalTopologyRunner.java    From hadoop-arch-book with Apache License 2.0 6 votes vote down vote up
/**
 * Create bolt which will persist ticks to HDFS.
 */
private static HdfsBolt createHdfsBolt() {

  // Use "|" instead of "," for field delimiter:
  RecordFormat format = new DelimitedRecordFormat()
    .withFieldDelimiter("|");
  // sync the filesystem after every 1k tuples:
  SyncPolicy syncPolicy = new CountSyncPolicy(100);

  // Rotate files when they reach 5MB:
  FileRotationPolicy rotationPolicy = 
    new FileSizeRotationPolicy(5.0f, Units.MB);

  // Write records to <user>/stock-ticks/ directory in HDFS:
  FileNameFormat fileNameFormat = new DefaultFileNameFormat()
    .withPath("stock-ticks/");

  HdfsBolt hdfsBolt = new HdfsBolt()
    .withFsUrl("hdfs://localhost:8020")
    .withFileNameFormat(fileNameFormat)
    .withRecordFormat(format)
    .withRotationPolicy(rotationPolicy)
    .withSyncPolicy(syncPolicy);

  return hdfsBolt;
}
 
Example #2
Source File: SourceHandler.java    From metron with Apache License 2.0 5 votes vote down vote up
public SourceHandler(List<RotationAction> rotationActions
                    , FileRotationPolicy rotationPolicy
                    , SyncPolicy syncPolicy
                    , FileNameFormat fileNameFormat
                    , SourceHandlerCallback cleanupCallback) throws IOException {
  this.rotationActions = rotationActions;
  this.rotationPolicy = rotationPolicy;
  this.syncPolicy = syncPolicy;
  this.fileNameFormat = fileNameFormat;
  this.cleanupCallback = cleanupCallback;
  initialize();
}
 
Example #3
Source File: HdfsWriterTest.java    From metron with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testWriteSingleFileWithNull() throws Exception {
  String function = "FORMAT('test-%s/%s', test.key, test.key)";
  WriterConfiguration config = buildWriterConfiguration(function);
  FileNameFormat format = new DefaultFileNameFormat()
          .withPath(folder.toString())
          .withExtension(".json")
          .withPrefix("prefix-");
  HdfsWriter writer = new HdfsWriter().withFileNameFormat(format);
  writer.init(new HashMap<String, String>(), config);
  writer.initFileNameFormat(createTopologyContext());

  // These two messages will be routed to the same folder, because test.key is the same
  JSONObject message = new JSONObject();
  message.put("test.key2", "test.value2");
  List<BulkMessage<JSONObject>> messages = new ArrayList<BulkMessage<JSONObject>>() {{
    add(new BulkMessage("message1", message));
  }};

  writer.write(SENSOR_NAME, config,messages);
  writer.close();

  ArrayList<String> expected = new ArrayList<>();
  expected.add(message.toJSONString());
  Collections.sort(expected);

  File outputFolder = new File(folder.getAbsolutePath() + "/test-null/null/");
  assertTrue(outputFolder.exists() && outputFolder.isDirectory());
  assertEquals(1, outputFolder.listFiles().length);

  for(File file : outputFolder.listFiles()) {
    List<String> lines = Files.readAllLines(file.toPath());
    Collections.sort(lines);
    assertEquals(expected, lines);
  }
}
 
Example #4
Source File: PathExtensionFileNameFormatTest.java    From metron with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetPath() {
  FileNameFormat delegate = new DefaultFileNameFormat().withExtension(EXTENSION).withPath(PATH);
  FileNameFormat sourceFormat = new PathExtensionFileNameFormat(PATH_EXTENSION, delegate);
  String actual = sourceFormat.getPath();
  String expected = PATH + "/" + PATH_EXTENSION;
  assertEquals(expected, actual);
}
 
Example #5
Source File: PathExtensionFileNameFormatTest.java    From metron with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetPathEmptyPathExtension() {
  FileNameFormat delegate = new DefaultFileNameFormat().withExtension(EXTENSION).withPath(PATH);
  FileNameFormat sourceFormat = new PathExtensionFileNameFormat("", delegate);
  String actual = sourceFormat.getPath();
  assertEquals(PATH + "/", actual);
}
 
Example #6
Source File: GzipHdfsBolt.java    From storm-crawler with Apache License 2.0 4 votes vote down vote up
public GzipHdfsBolt withFileNameFormat(FileNameFormat fileNameFormat) {
    this.fileNameFormat = fileNameFormat;
    return this;
}
 
Example #7
Source File: HdfsTopology.java    From storm-kafka-examples with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
    try{
        String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
        String topic = "order";
        String groupId = "id";
        int spoutNum = 3;
        int boltNum = 1;
        ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
        SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        // HDFS bolt
        // use "|" instead of "," for field delimiter
        RecordFormat format = new DelimitedRecordFormat()
                .withFieldDelimiter("|");

        // sync the filesystem after every 1k tuples
        SyncPolicy syncPolicy = new CountSyncPolicy(1000);

        // rotate files when they reach 5MB
        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
        // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/tmp/").withPrefix("order_").withExtension(".log");

        HdfsBolt hdfsBolt = new HdfsBolt()
                .withFsUrl("hdfs://wxb-1:8020")
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(format)
                .withRotationPolicy(rotationPolicy)
                .withSyncPolicy(syncPolicy);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
        builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
        builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");

        Config config = new Config();
        config.setDebug(true);

        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
    }catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example #8
Source File: WARCFileNameFormat.java    From storm-crawler with Apache License 2.0 4 votes vote down vote up
public FileNameFormat withPath(String path) {
    this.path = path;
    return this;
}
 
Example #9
Source File: HdfsFileTopology.java    From storm-hdfs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setNumWorkers(1);

    SentenceSpout spout = new SentenceSpout();

    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // rotate files when they reach 5MB
    FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/foo/")
            .withExtension(".txt");



    // use "|" instead of "," for field delimiter
    RecordFormat format = new DelimitedRecordFormat()
            .withFieldDelimiter("|");

    Yaml yaml = new Yaml();
    InputStream in = new FileInputStream(args[1]);
    Map<String, Object> yamlConf = (Map<String, Object>) yaml.load(in);
    in.close();
    config.put("hdfs.config", yamlConf);

    HdfsBolt bolt = new HdfsBolt()
            .withConfigKey("hdfs.config")
            .withFsUrl(args[0])
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy)
            .addRotationAction(new MoveFileAction().toDestination("/dest2/"));

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
    // SentenceSpout --> MyBolt
    builder.setBolt(BOLT_ID, bolt, 4)
            .shuffleGrouping(SENTENCE_SPOUT_ID);

    if (args.length == 2) {
        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(120);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 3) {
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    } else{
        System.out.println("Usage: HdfsFileTopology [topology name] <yaml config file>");
    }
}
 
Example #10
Source File: SequenceFileBolt.java    From storm-hdfs with Apache License 2.0 4 votes vote down vote up
public SequenceFileBolt withFileNameFormat(FileNameFormat fileNameFormat) {
    this.fileNameFormat = fileNameFormat;
    return this;
}
 
Example #11
Source File: HdfsBolt.java    From storm-hdfs with Apache License 2.0 4 votes vote down vote up
public HdfsBolt withFileNameFormat(FileNameFormat fileNameFormat){
    this.fileNameFormat = fileNameFormat;
    return this;
}
 
Example #12
Source File: TopologyRunner.java    From opensoc-streaming with Apache License 2.0 4 votes vote down vote up
private boolean initializeHDFSBolt(String topology_name, String name) {
	try {

		String messageUpstreamComponent = messageComponents
				.get(messageComponents.size() - 1);

		System.out.println("[OpenSOC] ------" + name
				+ " is initializing from " + messageUpstreamComponent);

		RecordFormat format = new DelimitedRecordFormat()
				.withFieldDelimiter(
						config.getString("bolt.hdfs.field.delimiter")
								.toString()).withFields(
						new Fields("message"));

		// sync the file system after every x number of tuples
		SyncPolicy syncPolicy = new CountSyncPolicy(Integer.valueOf(config
				.getString("bolt.hdfs.batch.size").toString()));

		// rotate files when they reach certain size
		FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(
				Float.valueOf(config.getString(
						"bolt.hdfs.file.rotation.size.in.mb").toString()),
				Units.MB);

		FileNameFormat fileNameFormat = new DefaultFileNameFormat()
				.withPath(config.getString("bolt.hdfs.wip.file.path")
						.toString());

		// Post rotate action
		MoveFileAction moveFileAction = (new MoveFileAction())
				.toDestination(config.getString(
						"bolt.hdfs.finished.file.path").toString());

		HdfsBolt hdfsBolt = new HdfsBolt()
				.withFsUrl(
						config.getString("bolt.hdfs.file.system.url")
								.toString())
				.withFileNameFormat(fileNameFormat)
				.withRecordFormat(format)
				.withRotationPolicy(rotationPolicy)
				.withSyncPolicy(syncPolicy)
				.addRotationAction(moveFileAction);
		if (config.getString("bolt.hdfs.compression.codec.class") != null) {
			hdfsBolt.withCompressionCodec(config.getString(
					"bolt.hdfs.compression.codec.class").toString());
		}

		builder.setBolt(name, hdfsBolt,
				config.getInt("bolt.hdfs.parallelism.hint"))
				.shuffleGrouping(messageUpstreamComponent, "message")
				.setNumTasks(config.getInt("bolt.hdfs.num.tasks"));

	} catch (Exception e) {
		e.printStackTrace();
		System.exit(0);
	}

	return true;
}
 
Example #13
Source File: HdfsWriterTest.java    From metron with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testWriteSingleFile() throws Exception {
  String function = "FORMAT('test-%s/%s', test.key, test.key)";
  WriterConfiguration config = buildWriterConfiguration(function);
  FileNameFormat format = new DefaultFileNameFormat()
          .withPath(folder.toString())
          .withExtension(".json")
          .withPrefix("prefix-");
  HdfsWriter writer = new HdfsWriter().withFileNameFormat(format);
  writer.init(new HashMap<String, String>(), config);
  writer.initFileNameFormat(createTopologyContext());

  // These two messages will be routed to the same folder, because test.key is the same
  JSONObject message = new JSONObject();
  message.put("test.key", "test.value");
  message.put("test.key2", "test.value2");
  JSONObject message2 = new JSONObject();
  message2.put("test.key", "test.value");
  message2.put("test.key3", "test.value2");
  List<BulkMessage<JSONObject>> messages = new ArrayList<BulkMessage<JSONObject>>() {{
    add(new BulkMessage<>("message1", message));
    add(new BulkMessage<>("message2", message2));
  }};

  writer.write(SENSOR_NAME, config, messages);
  writer.close();

  ArrayList<String> expected = new ArrayList<>();
  expected.add(message.toJSONString());
  expected.add(message2.toJSONString());
  Collections.sort(expected);

  File outputFolder = new File(folder.getAbsolutePath() + "/test-test.value/test.value/");
  assertTrue(outputFolder.exists() && outputFolder.isDirectory());
  assertEquals(1, outputFolder.listFiles().length);

  for(File file : outputFolder.listFiles()) {
    List<String> lines = Files.readAllLines(file.toPath());
    Collections.sort(lines);
    assertEquals(expected, lines);
  }
}
 
Example #14
Source File: HdfsWriterTest.java    From metron with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testWriteNoOutputFunction() throws Exception {
  FileNameFormat format = new DefaultFileNameFormat()
          .withPath(folder.toString())
          .withExtension(".json")
          .withPrefix("prefix-");
  HdfsWriter writer = new HdfsWriter().withFileNameFormat(format);
  IndexingConfigurations indexingConfig = new IndexingConfigurations();
  WriterConfiguration config = new IndexingWriterConfiguration(WRITER_NAME, indexingConfig);
  writer.init(new HashMap<String, String>(), config);
  writer.initFileNameFormat(createTopologyContext());

  JSONObject message = new JSONObject();
  message.put("test.key", "test.value");
  message.put("test.key2", "test.value2");
  JSONObject message2 = new JSONObject();
  message2.put("test.key", "test.value3");
  message2.put("test.key2", "test.value2");
  List<BulkMessage<JSONObject>> messages = new ArrayList<BulkMessage<JSONObject>>() {{
    add(new BulkMessage("message1", message));
    add(new BulkMessage("message2", message2));
  }};

  writer.write(SENSOR_NAME, config, messages);
  writer.close();

  ArrayList<String> expected = new ArrayList<>();
  expected.add(message.toJSONString());
  expected.add(message2.toJSONString());
  Collections.sort(expected);

  // Default to just putting it in the base folder + the sensor name
  File outputFolder = new File(folder.getAbsolutePath() + "/" + SENSOR_NAME);
  assertTrue(outputFolder.exists() && outputFolder.isDirectory());
  assertEquals(1, outputFolder.listFiles().length);

  for(File file : outputFolder.listFiles()) {
    List<String> lines = Files.readAllLines(file.toPath());
    Collections.sort(lines);
    assertEquals(expected, lines);
  }
}
 
Example #15
Source File: HdfsWriter.java    From metron with Apache License 2.0 4 votes vote down vote up
public HdfsWriter withFileNameFormat(FileNameFormat fileNameFormat){
  this.fileNameFormat = fileNameFormat;
  return this;
}
 
Example #16
Source File: PathExtensionFileNameFormat.java    From metron with Apache License 2.0 4 votes vote down vote up
public PathExtensionFileNameFormat(String pathExtension, FileNameFormat delegate) {
  this.delegate = delegate;
  this.pathExtension = pathExtension;
}
 
Example #17
Source File: WARCFileNameFormat.java    From storm-crawler with Apache License 2.0 2 votes vote down vote up
/**
 * Overrides the default prefix.
 * 
 * @param prefix
 * @return
 */
public FileNameFormat withPrefix(String prefix) {
    this.prefix = prefix;
    return this;
}