Java Code Examples for org.apache.flume.Context

The following examples show how to use org.apache.flume.Context. These examples are extracted from open source projects.
Example 1
Project: sequenceiq-samples   File: CustomLastfmHeaderAndBodyTextEventSerializerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testNoNewline() throws FileNotFoundException, IOException {

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("header1", "value1");

    OutputStream out = new FileOutputStream(testFile);
    Context context = new Context();
    context.put("appendNewline", "false");
    CustomLastfmHeaderAndBodyTextEventSerializer.Builder builder = CustomLastfmHeaderAndBodyTextEventSerializer.builder();
    EventSerializer serializer = builder.build(new Context(), out);
    serializer.afterCreate();
    serializer.write(EventBuilder.withBody("event 1", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 2", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 3", Charsets.UTF_8, headers));
    serializer.flush();
    serializer.beforeClose();
    out.flush();
    out.close();

    BufferedReader reader = new BufferedReader(new FileReader(testFile));
    Assert.assertNull(reader.readLine());
    reader.close();

    FileUtils.forceDelete(testFile);
}
 
Example 2
Project: phoenix   File: PhoenixSinkIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example 3
Project: flume-elasticsearch-sink   File: TestAvroSerializer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * tests Avro Serializer
 */
@Test
public void testSerializer() throws Exception {
    Context context = new Context();
    String schemaFile = getClass().getResource("/schema.avsc").getFile();
    context.put(ES_AVRO_SCHEMA_FILE, schemaFile);
    avroSerializer.configure(context);
    Schema schema = new Schema.Parser().parse(new File(schemaFile));
    GenericRecord user = generateGenericRecord(schema);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    Encoder encoder = new EncoderFactory().binaryEncoder(outputStream, null);
    datumWriter.write(user, encoder);
    encoder.flush();
    Event event = EventBuilder.withBody(outputStream.toByteArray());
    XContentBuilder expected = generateContentBuilder();
    XContentBuilder actual = avroSerializer.serialize(event);
    JsonParser parser = new JsonParser();
    assertEquals(parser.parse(Strings.toString(expected)), parser.parse(Strings.toString(actual)));
}
 
Example 4
Project: datacollector   File: TestFlumeThriftTarget.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  port = NetworkUtils.getRandomPort();
  source = new ThriftSource();
  ch = new MemoryChannel();
  Configurables.configure(ch, new Context());

  Context context = new Context();
  context.put("port", String.valueOf(port));
  context.put("bind", "localhost");
  Configurables.configure(source, context);

  List<Channel> channels = new ArrayList<>();
  channels.add(ch);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);
  source.setChannelProcessor(new ChannelProcessor(rcs));
  source.start();
}
 
Example 5
Project: mt-flume   File: TestNetcatSource.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  logger.info("Running setup");

  channel = new MemoryChannel();
  source = new NetcatSource();

  Context context = new Context();

  Configurables.configure(channel, context);
  List<Channel> channels = Lists.newArrayList(channel);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));
}
 
Example 6
Project: mt-flume   File: TestHostInterceptor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure host is NOT overwritten when preserveExisting=true.
 */
@Test
public void testPreserve() throws Exception {
  Context ctx = new Context();
  ctx.put("preserveExisting", "true");

  Interceptor.Builder builder = InterceptorBuilderFactory.newInstance(
          InterceptorType.HOST.toString());
  builder.configure(ctx);
  Interceptor interceptor = builder.build();

  final String ORIGINAL_HOST = "originalhost";
  Event eventBeforeIntercept = EventBuilder.withBody("test event",
          Charsets.UTF_8);
  eventBeforeIntercept.getHeaders().put(Constants.HOST, ORIGINAL_HOST);
  Assert.assertEquals(ORIGINAL_HOST,
          eventBeforeIntercept.getHeaders().get(Constants.HOST));

  String expectedHost = ORIGINAL_HOST;
  Event eventAfterIntercept = interceptor.intercept(eventBeforeIntercept);
  String actualHost = eventAfterIntercept.getHeaders().get(Constants.HOST);

  Assert.assertNotNull(actualHost);
  Assert.assertEquals(expectedHost, actualHost);
}
 
Example 7
Project: fiware-cygnus   File: NGSICartoDBSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * [NGSICartoDBSink.configure] -------- Only enable_distance configuration works. This must be removed
 * once enable_raw parameter is removed (currently it is just deprecated).
 * @throws java.lang.Exception
 */
@Test
public void testConfigureEnableDistanceOnly() throws Exception {
    System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
            + "-------- Only enable_distance configuration works");
    // Create a NGSICartoDBSink
    Context context = new Context();
    context.put("enable_distance", "true");
    context.put("keys_conf_file", ""); // any value except for null
    NGSICartoDBSink sink = new NGSICartoDBSink();
    sink.configure(context);
    
    try {
        assertTrue(sink.getEnableDistanceHistoric());
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "-  OK  - Only 'enable_distance' was configured and worked");
    } catch (AssertionError e) {
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "- FAIL - Only 'enable_distance' was configured and did not work");
        throw e;
    } // try catch
}
 
Example 8
Project: flume-ng-extends-source   File: KafkaSource.java    License: MIT License 6 votes vote down vote up
/**
 * We configure the source and generate properties for the Kafka Consumer
 *
 * Kafka Consumer properties are generated as follows:
 *
 * 1. Generate a properties object with some static defaults that can be
 * overridden by Source configuration 2. We add the configuration users added
 * for Kafka (parameters starting with kafka. and must be valid Kafka Consumer
 * properties 3. We add the source documented parameters which can override
 * other properties
 *
 * @param context
 */
public void configure(Context context) {
  this.context = context;
  batchUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_SIZE,
          KafkaSourceConstants.DEFAULT_BATCH_SIZE);
  timeUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_DURATION_MS,
          KafkaSourceConstants.DEFAULT_BATCH_DURATION);
  topic = context.getString(KafkaSourceConstants.TOPIC);

  if(topic == null) {
    throw new ConfigurationException("Kafka topic must be specified.");
  }

  kafkaProps = KafkaSourceUtil.getKafkaProperties(context);
  consumerTimeout = Integer.parseInt(kafkaProps.getProperty(
          KafkaSourceConstants.CONSUMER_TIMEOUT));
  kafkaAutoCommitEnabled = Boolean.parseBoolean(kafkaProps.getProperty(
          KafkaSourceConstants.AUTO_COMMIT_ENABLED));

  if (counter == null) {
    counter = new KafkaSourceCounter(getName());
  }
}
 
Example 9
Project: phoenix   File: TestPhoenixSink.java    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example 10
Project: fiware-cygnus   File: NGSIPostgreSQLSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContextforNativeTypes(String attrPersistence, String batchSize, String batchTime, String batchTTL,
                                            String dataModel, String enableEncoding, String enableGrouping, String enableLowercase, String host,
                                            String password, String port, String username, String cache, String attrNativeTypes) {
    Context context = new Context();
    context.put("attr_persistence", attrPersistence);
    context.put("batch_size", batchSize);
    context.put("batch_time", batchTime);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("postgresql_host", host);
    context.put("postgresql_password", password);
    context.put("postgresql_port", port);
    context.put("postgresql_username", username);
    context.put("backend.enable_cache", cache);
    context.put("attr_native_types", attrNativeTypes);
    return context;
}
 
Example 11
Project: fiware-cygnus   File: NGSICartoDBSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContext(String apiKey, String backendMaxConns, String backendMaxConnsPerRoute,
        String batchSize, String batchTimeout, String batchTTL, String dataModel, String enableDistanceHistoric,
        String enableGrouping, String enableLowercase, String enableRawHistoric, String enableRawSnapshot,
        String swapCoordinates, String keysConfFile) {
    Context context = new Context();
    context.put("api_key", apiKey);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batch_size", batchSize);
    context.put("batch_timeout", batchTimeout);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_distance_historic", enableDistanceHistoric);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("enable_raw_historic", enableRawHistoric);
    context.put("enable_raw_snapshot", enableRawSnapshot);
    context.put("swap_coordinates", swapCoordinates);
    context.put("keys_conf_file", keysConfFile);
    return context;
}
 
Example 12
Project: flume-plugins   File: JavaLogAvroEventSerializer.java    License: MIT License 5 votes vote down vote up
@Override
public EventSerializer build(Context context, OutputStream out) {
    JavaLogAvroEventSerializer writer = null;
    try {
        writer = new JavaLogAvroEventSerializer(out);
        writer.configure(context);
    } catch (IOException e) {
        log.error("Unable to parse schema file. Exception follows.", e);
    }
    return writer;
}
 
Example 13
Project: mt-flume   File: HDFSCompressedDataStream.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  super.configure(context);

  serializerType = context.getString("serializer", "TEXT");
  useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
      false);
  serializerContext = new Context(
      context.getSubProperties(EventSerializer.CTX_PREFIX));
  logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = "
      + useRawLocalFileSystem);
}
 
Example 14
Project: mt-flume   File: TestUUIDInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPrefix() throws Exception {
  Context context = new Context();
  context.put(UUIDInterceptor.HEADER_NAME, ID);
  context.put(UUIDInterceptor.PREFIX_NAME, "bar#");
  Event event = new SimpleEvent();
  assertTrue(build(context).intercept(event).getHeaders().get(ID).startsWith("bar#"));
}
 
Example 15
Project: mt-flume   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void shouldParseMultipleHostUsingDefaultPorts() {
  parameters.put(HOSTNAMES, "10.5.5.27,10.5.5.28,10.5.5.29");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  InetSocketTransportAddress[] expected = {
      new InetSocketTransportAddress("10.5.5.27", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.28", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.29", DEFAULT_PORT) };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example 16
Project: mt-flume   File: TestSpoolDirectorySource.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPutFilenameHeader() throws IOException, InterruptedException {
  Context context = new Context();
  File f1 = new File(tmpDir.getAbsolutePath() + "/file1");

  Files.write("file1line1\nfile1line2\nfile1line3\nfile1line4\n" +
              "file1line5\nfile1line6\nfile1line7\nfile1line8\n",
              f1, Charsets.UTF_8);

  context.put(SpoolDirectorySourceConfigurationConstants.SPOOL_DIRECTORY,
      tmpDir.getAbsolutePath());
  context.put(SpoolDirectorySourceConfigurationConstants.FILENAME_HEADER,
      "true");
  context.put(SpoolDirectorySourceConfigurationConstants.FILENAME_HEADER_KEY,
      "fileHeaderKeyTest");

  Configurables.configure(source, context);
  source.start();
  Thread.sleep(500);
  Transaction txn = channel.getTransaction();
  txn.begin();
  Event e = channel.take();
  Assert.assertNotNull("Event must not be null", e);
  Assert.assertNotNull("Event headers must not be null", e.getHeaders());
  Assert.assertNotNull(e.getHeaders().get("fileHeaderKeyTest"));
  Assert.assertEquals(f1.getAbsolutePath(),
      e.getHeaders().get("fileHeaderKeyTest"));
  txn.commit();
  txn.close();
}
 
Example 17
Project: fiware-cygnus   File: NGSICKANSinkTest.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
private Context createContextforNativeTypes(String backendImpl, String backendMaxConns, String backendMaxConnsPerRoute,
                                            String batchSize, String batchTime, String batchTTL, String csvSeparator, String dataModel,
                                            String enableEncoding, String enableGrouping, String enableLowercase, String fileFormat, String host,
                                            String password, String port, String username, String hive, String krb5, String token,
                                            String serviceAsNamespace, String attrNativeTypes, String metadata) {
    Context context = new Context();
    context.put("backend.impl", backendImpl);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batchSize", batchSize);
    context.put("batchTime", batchTime);
    context.put("batchTTL", batchTTL);
    context.put("csv_separator", csvSeparator);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_grouping", enableLowercase);
    context.put("file_format", fileFormat);
    context.put("hdfs_host", host);
    context.put("hdfs_password", password);
    context.put("hdfs_port", port);
    context.put("hdfs_username", username);
    context.put("hive", hive);
    context.put("krb5_auth", krb5);
    context.put("oauth2_token", token);
    context.put("service_as_namespace", serviceAsNamespace);
    context.put("attr_native_types", attrNativeTypes);
    context.put("attr_metadata_store", metadata);
    return context;
}
 
Example 18
Project: ingestion   File: TimeBasedIndexNameBuilderTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  Context context = new Context();
  context.put(ElasticSearchSinkConstants.INDEX_NAME, "prefix");
  indexNameBuilder = new TimeBasedIndexNameBuilder();
  indexNameBuilder.configure(context);
}
 
Example 19
Project: flume-elasticsearch-sink   File: ElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
    String[] hosts = getHosts(context);
    if (ArrayUtils.isNotEmpty(hosts)) {
        client = new ElasticsearchClientBuilder(
                context.getString(PREFIX + ES_CLUSTER_NAME, DEFAULT_ES_CLUSTER_NAME), hosts)
                .build();
        buildIndexBuilder(context);
        buildSerializer(context);
        bulkProcessor = new BulkProcessorBulider().buildBulkProcessor(context, this);
    } else {
        logger.error("Could not create Rest client, No host exist");
    }
}
 
Example 20
Project: mt-flume   File: NetcatSource.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  String hostKey = NetcatSourceConfigurationConstants.CONFIG_HOSTNAME;
  String portKey = NetcatSourceConfigurationConstants.CONFIG_PORT;
  String ackEventKey = NetcatSourceConfigurationConstants.CONFIG_ACKEVENT;

  Configurables.ensureRequiredNonNull(context, hostKey, portKey);

  hostName = context.getString(hostKey);
  port = context.getInteger(portKey);
  ackEveryEvent = context.getBoolean(ackEventKey, true);
  maxLineLength = context.getInteger(
      NetcatSourceConfigurationConstants.CONFIG_MAX_LINE_LENGTH,
      NetcatSourceConfigurationConstants.DEFAULT_MAX_LINE_LENGTH);
}
 
Example 21
Project: phoenix   File: PhoenixSink.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the serializer for flume events.
 * @param eventSerializerType
 */
private void initializeSerializer(final Context context,final String eventSerializerType) {
    
   EventSerializers eventSerializer = null;
   try {
           eventSerializer =  EventSerializers.valueOf(eventSerializerType.toUpperCase());
    } catch(IllegalArgumentException iae) {
           logger.error("An invalid eventSerializer {} was passed. Please specify one of {} ",eventSerializerType,
                   Joiner.on(",").skipNulls().join(EventSerializers.values()));
           Throwables.propagate(iae);
    }
   
   final Context serializerContext = new Context();
   serializerContext.putAll(context.getSubProperties(FlumeConstants.CONFIG_SERIALIZER_PREFIX));
   copyPropertiesToSerializerContext(context,serializerContext);
         
   try {
     @SuppressWarnings("unchecked")
     Class<? extends EventSerializer> clazz = (Class<? extends EventSerializer>) Class.forName(eventSerializer.getClassName());
     serializer = clazz.newInstance();
     serializer.configure(serializerContext);
     
   } catch (Exception e) {
     logger.error("Could not instantiate event serializer." , e);
     Throwables.propagate(e);
   }
}
 
Example 22
Project: mt-flume   File: TestMorphlineInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Test
/* leading XXXXX does not match regex, thus we expect the event to be dropped */
public void testGrokIfNotMatchDropEventDrop() throws Exception {
  Context context = new Context();
  context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM, RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
  String msg = "<XXXXXXXXXXXXX164>Feb  4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0 port 22.";
  Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
  Event actual = build(context).intercept(input);
  assertNull(actual);
}
 
Example 23
Project: ingestion   File: MongoSinkTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = MongoSinkException.class)
public void confSingleModeWithNoDefaultDB() throws Exception {
    final MongoSink mongoSink = new MongoSink();
    final Context context = new Context();
    context.put("dynamic", "false");
    context.put("mongoUri", "mongodb://localhost:10000");
    Configurables.configure(mongoSink, context);
}
 
Example 24
Project: ingestion   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostAndPortsWithWhitespaces() {
  parameters.put(HOSTNAMES,
      " 10.5.5.27 : 9300 , 10.5.5.28 : 9301 , 10.5.5.29 : 9302 ");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27:9300", "10.5.5.28:9301", "10.5.5.29:9302" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example 25
Project: ingestion   File: TestCassandraSink.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void confMissingCqlFileFails() {
  final CassandraSink sink = new CassandraSink();
  final Context context = new Context();
  context.put("tables", "keyspace.table");
  context.put("cqlFile", "/NOT/FOUND/MY.CQL");
  thrown.expect(ConfigurationException.class);
  thrown.expectMessage("Cannot read CQL file: /NOT/FOUND/MY.CQL");
  thrown.expectCause(new CauseMatcher(FileNotFoundException.class));
  sink.configure(context);
}
 
Example 26
Project: ingestion   File: RedisSourceTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    source = new RedisSource();
    channel = new MemoryChannel();

    Configurables.configure(channel, new Context());

    List<Channel> channels = new ArrayList<Channel>();
    channels.add(channel);

    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);

    source.setChannelProcessor(new ChannelProcessor(rcs));
}
 
Example 27
Project: phoenix   File: TestPhoenixSink.java    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test(expected= NullPointerException.class)
public void testInvalidConfiguration () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example 28
Project: ingestion   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostWithWhitespacesUsingDefaultPorts() {
  parameters.put(HOSTNAMES, " 10.5.5.27 , 10.5.5.28 , 10.5.5.29 ");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27", "10.5.5.28", "10.5.5.29" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example 29
Project: phoenix   File: PhoenixSinkIT.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected=IllegalArgumentException.class)
public void testInvalidConfigurationOfSerializer () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,"csv");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example 30
Project: SparkOnALog   File: CopyOfFlumeHBaseWordCountInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
	tableName = context.getString("hbase-table", "flumeCounter");
	columnFamilyName = context.getString("hbase-column-family", "C");
	flushIntervals = Integer.parseInt(context.getString(
			"hbase-flush-intervals", "3000"));
}
 
Example 31
Project: flume-ng-extends-source   File: KafkaSourceUtil.java    License: MIT License 5 votes vote down vote up
public static Properties getKafkaProperties(Context context) {
  log.info("context={}",context.toString());
  Properties props =  generateDefaultKafkaProps();
  setKafkaProps(context,props);
  addDocumentedKafkaProps(context,props);
  return props;
}
 
Example 32
Project: searchanalytics-bigdata   File: FlumeESSinkServiceImpl.java    License: MIT License 5 votes vote down vote up
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example 33
Project: ingestion   File: XmlXpathDeserializer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public EventDeserializer build(Context context, ResettableInputStream in) {
    if (!(in instanceof Seekable)) {
        throw new IllegalArgumentException(
                "Cannot use this deserializer without a Seekable input stream");
    }
    try {
        return new XmlXpathDeserializer(context, in);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}
 
Example 34
Project: ingestion   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldIndexFiveEvents() throws Exception {
  // Make it so we only need to call process once
  parameters.put(BATCH_SIZE, "5");
  Configurables.configure(fixture, new Context(parameters));
  Channel channel = bindAndStartChannel(fixture);

  int numberOfEvents = 5;
  Event[] events = new Event[numberOfEvents];

  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < numberOfEvents; i++) {
    String body = "event #" + i + " of " + numberOfEvents;
    Event event = EventBuilder.withBody(body.getBytes());
    events[i] = event;
    channel.put(event);
  }
  tx.commit();
  tx.close();

  fixture.process();
  fixture.stop();
  client.admin().indices()
      .refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

  assertMatchAllQuery(numberOfEvents, events);
  assertBodyQuery(5, events);
}
 
Example 35
Project: mt-flume   File: FlumeEventAvroEventDeserializer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public EventDeserializer build(Context context, ResettableInputStream in) {
  if (!(in instanceof RemoteMarkable)) {
    throw new IllegalArgumentException("Cannot use this deserializer " +
        "without a RemoteMarkable input stream");
  }
  FlumeEventAvroEventDeserializer deserializer
      = new FlumeEventAvroEventDeserializer(context, in);
  try {
    deserializer.initialize();
  } catch (Exception e) {
    throw new FlumeException("Cannot instantiate deserializer", e);
  }
  return deserializer;
}
 
Example 36
Project: ingestion   File: MongoSinkDynamicTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
Example 37
Project: mt-flume   File: TestSourceConfiguration.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test fails without FLUME-1847
 */
@Test(expected = ConfigurationException.class)
public void testFLUME1847() throws Exception {
  Context context = new Context();
  context.put("type", "something");
  SourceConfiguration sourceConfig = new SourceConfiguration("src");
  sourceConfig.configure(context);

}
 
Example 38
Project: ingestion   File: RestSource.java    License: Apache License 2.0 5 votes vote down vote up
private Client initClient(Context context) {
    final Boolean skipSsl = context.getBoolean(CONF_SKIP_SSL, Boolean.FALSE);
    if (skipSsl) {
        ClientConfig config = new DefaultClientConfig(); // SSL configuration
        // SSL configuration
        config.getProperties().put(HTTPSProperties.PROPERTY_HTTPS_PROPERTIES,
                new com.sun.jersey.client.urlconnection.HTTPSProperties(getHostnameVerifier(), getSSLContext()));
        return Client.create(config);
    } else {
        return new Client();
    }
}
 
Example 39
Project: ingestion   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostAndPorts() {
  parameters.put(HOSTNAMES, "10.5.5.27:9300,10.5.5.28:9301,10.5.5.29:9302");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27:9300", "10.5.5.28:9301", "10.5.5.29:9302" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example 40
Project: mt-flume   File: TestLineDeserializer.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSimpleViaFactory() throws IOException {
  ResettableInputStream in = new ResettableTestStringInputStream(mini);
  EventDeserializer des;
  des = EventDeserializerFactory.getInstance("LINE", new Context(), in);
  validateMiniParse(des);
}