org.apache.flume.Context Java Examples

The following examples show how to use org.apache.flume.Context. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
@Test
public void testNoNewline() throws FileNotFoundException, IOException {

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("header1", "value1");

    OutputStream out = new FileOutputStream(testFile);
    Context context = new Context();
    context.put("appendNewline", "false");
    CustomLastfmHeaderAndBodyTextEventSerializer.Builder builder = CustomLastfmHeaderAndBodyTextEventSerializer.builder();
    EventSerializer serializer = builder.build(new Context(), out);
    serializer.afterCreate();
    serializer.write(EventBuilder.withBody("event 1", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 2", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 3", Charsets.UTF_8, headers));
    serializer.flush();
    serializer.beforeClose();
    out.flush();
    out.close();

    BufferedReader reader = new BufferedReader(new FileReader(testFile));
    Assert.assertNull(reader.readLine());
    reader.close();

    FileUtils.forceDelete(testFile);
}
 
Example #2
Source Project: phoenix   Author: cloudera-labs   File: PhoenixSinkIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example #3
Source Project: flume-elasticsearch-sink   Author: cognitree   File: TestAvroSerializer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * tests Avro Serializer
 */
@Test
public void testSerializer() throws Exception {
    Context context = new Context();
    String schemaFile = getClass().getResource("/schema.avsc").getFile();
    context.put(ES_AVRO_SCHEMA_FILE, schemaFile);
    avroSerializer.configure(context);
    Schema schema = new Schema.Parser().parse(new File(schemaFile));
    GenericRecord user = generateGenericRecord(schema);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    Encoder encoder = new EncoderFactory().binaryEncoder(outputStream, null);
    datumWriter.write(user, encoder);
    encoder.flush();
    Event event = EventBuilder.withBody(outputStream.toByteArray());
    XContentBuilder expected = generateContentBuilder();
    XContentBuilder actual = avroSerializer.serialize(event);
    JsonParser parser = new JsonParser();
    assertEquals(parser.parse(Strings.toString(expected)), parser.parse(Strings.toString(actual)));
}
 
Example #4
Source Project: datacollector   Author: streamsets   File: TestFlumeThriftTarget.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  port = NetworkUtils.getRandomPort();
  source = new ThriftSource();
  ch = new MemoryChannel();
  Configurables.configure(ch, new Context());

  Context context = new Context();
  context.put("port", String.valueOf(port));
  context.put("bind", "localhost");
  Configurables.configure(source, context);

  List<Channel> channels = new ArrayList<>();
  channels.add(ch);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);
  source.setChannelProcessor(new ChannelProcessor(rcs));
  source.start();
}
 
Example #5
Source Project: mt-flume   Author: javachen   File: TestNetcatSource.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  logger.info("Running setup");

  channel = new MemoryChannel();
  source = new NetcatSource();

  Context context = new Context();

  Configurables.configure(channel, context);
  List<Channel> channels = Lists.newArrayList(channel);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));
}
 
Example #6
Source Project: mt-flume   Author: javachen   File: TestHostInterceptor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure host is NOT overwritten when preserveExisting=true.
 */
@Test
public void testPreserve() throws Exception {
  Context ctx = new Context();
  ctx.put("preserveExisting", "true");

  Interceptor.Builder builder = InterceptorBuilderFactory.newInstance(
          InterceptorType.HOST.toString());
  builder.configure(ctx);
  Interceptor interceptor = builder.build();

  final String ORIGINAL_HOST = "originalhost";
  Event eventBeforeIntercept = EventBuilder.withBody("test event",
          Charsets.UTF_8);
  eventBeforeIntercept.getHeaders().put(Constants.HOST, ORIGINAL_HOST);
  Assert.assertEquals(ORIGINAL_HOST,
          eventBeforeIntercept.getHeaders().get(Constants.HOST));

  String expectedHost = ORIGINAL_HOST;
  Event eventAfterIntercept = interceptor.intercept(eventBeforeIntercept);
  String actualHost = eventAfterIntercept.getHeaders().get(Constants.HOST);

  Assert.assertNotNull(actualHost);
  Assert.assertEquals(expectedHost, actualHost);
}
 
Example #7
Source Project: fiware-cygnus   Author: telefonicaid   File: NGSICartoDBSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * [NGSICartoDBSink.configure] -------- Only enable_distance configuration works. This must be removed
 * once enable_raw parameter is removed (currently it is just deprecated).
 * @throws java.lang.Exception
 */
@Test
public void testConfigureEnableDistanceOnly() throws Exception {
    System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
            + "-------- Only enable_distance configuration works");
    // Create a NGSICartoDBSink
    Context context = new Context();
    context.put("enable_distance", "true");
    context.put("keys_conf_file", ""); // any value except for null
    NGSICartoDBSink sink = new NGSICartoDBSink();
    sink.configure(context);
    
    try {
        assertTrue(sink.getEnableDistanceHistoric());
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "-  OK  - Only 'enable_distance' was configured and worked");
    } catch (AssertionError e) {
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "- FAIL - Only 'enable_distance' was configured and did not work");
        throw e;
    } // try catch
}
 
Example #8
Source Project: flume-ng-extends-source   Author: ningg   File: KafkaSource.java    License: MIT License 6 votes vote down vote up
/**
 * We configure the source and generate properties for the Kafka Consumer
 *
 * Kafka Consumer properties are generated as follows:
 *
 * 1. Generate a properties object with some static defaults that can be
 * overridden by Source configuration 2. We add the configuration users added
 * for Kafka (parameters starting with kafka. and must be valid Kafka Consumer
 * properties 3. We add the source documented parameters which can override
 * other properties
 *
 * @param context
 */
public void configure(Context context) {
  this.context = context;
  batchUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_SIZE,
          KafkaSourceConstants.DEFAULT_BATCH_SIZE);
  timeUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_DURATION_MS,
          KafkaSourceConstants.DEFAULT_BATCH_DURATION);
  topic = context.getString(KafkaSourceConstants.TOPIC);

  if(topic == null) {
    throw new ConfigurationException("Kafka topic must be specified.");
  }

  kafkaProps = KafkaSourceUtil.getKafkaProperties(context);
  consumerTimeout = Integer.parseInt(kafkaProps.getProperty(
          KafkaSourceConstants.CONSUMER_TIMEOUT));
  kafkaAutoCommitEnabled = Boolean.parseBoolean(kafkaProps.getProperty(
          KafkaSourceConstants.AUTO_COMMIT_ENABLED));

  if (counter == null) {
    counter = new KafkaSourceCounter(getName());
  }
}
 
Example #9
Source Project: phoenix   Author: forcedotcom   File: TestPhoenixSink.java    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example #10
Source Project: fiware-cygnus   Author: telefonicaid   File: NGSIPostgreSQLSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContextforNativeTypes(String attrPersistence, String batchSize, String batchTime, String batchTTL,
                                            String dataModel, String enableEncoding, String enableGrouping, String enableLowercase, String host,
                                            String password, String port, String username, String cache, String attrNativeTypes) {
    Context context = new Context();
    context.put("attr_persistence", attrPersistence);
    context.put("batch_size", batchSize);
    context.put("batch_time", batchTime);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("postgresql_host", host);
    context.put("postgresql_password", password);
    context.put("postgresql_port", port);
    context.put("postgresql_username", username);
    context.put("backend.enable_cache", cache);
    context.put("attr_native_types", attrNativeTypes);
    return context;
}
 
Example #11
Source Project: fiware-cygnus   Author: telefonicaid   File: NGSICartoDBSinkTest.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContext(String apiKey, String backendMaxConns, String backendMaxConnsPerRoute,
        String batchSize, String batchTimeout, String batchTTL, String dataModel, String enableDistanceHistoric,
        String enableGrouping, String enableLowercase, String enableRawHistoric, String enableRawSnapshot,
        String swapCoordinates, String keysConfFile) {
    Context context = new Context();
    context.put("api_key", apiKey);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batch_size", batchSize);
    context.put("batch_timeout", batchTimeout);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_distance_historic", enableDistanceHistoric);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("enable_raw_historic", enableRawHistoric);
    context.put("enable_raw_snapshot", enableRawSnapshot);
    context.put("swap_coordinates", swapCoordinates);
    context.put("keys_conf_file", keysConfFile);
    return context;
}
 
Example #12
Source Project: flume-plugins   Author: DandyDev   File: JavaLogAvroEventSerializer.java    License: MIT License 5 votes vote down vote up
@Override
public EventSerializer build(Context context, OutputStream out) {
    JavaLogAvroEventSerializer writer = null;
    try {
        writer = new JavaLogAvroEventSerializer(out);
        writer.configure(context);
    } catch (IOException e) {
        log.error("Unable to parse schema file. Exception follows.", e);
    }
    return writer;
}
 
Example #13
Source Project: mt-flume   Author: javachen   File: HDFSCompressedDataStream.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  super.configure(context);

  serializerType = context.getString("serializer", "TEXT");
  useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
      false);
  serializerContext = new Context(
      context.getSubProperties(EventSerializer.CTX_PREFIX));
  logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = "
      + useRawLocalFileSystem);
}
 
Example #14
Source Project: mt-flume   Author: javachen   File: TestUUIDInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPrefix() throws Exception {
  Context context = new Context();
  context.put(UUIDInterceptor.HEADER_NAME, ID);
  context.put(UUIDInterceptor.PREFIX_NAME, "bar#");
  Event event = new SimpleEvent();
  assertTrue(build(context).intercept(event).getHeaders().get(ID).startsWith("bar#"));
}
 
Example #15
Source Project: mt-flume   Author: javachen   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void shouldParseMultipleHostUsingDefaultPorts() {
  parameters.put(HOSTNAMES, "10.5.5.27,10.5.5.28,10.5.5.29");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  InetSocketTransportAddress[] expected = {
      new InetSocketTransportAddress("10.5.5.27", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.28", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.29", DEFAULT_PORT) };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example #16
Source Project: mt-flume   Author: javachen   File: TestSpoolDirectorySource.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPutFilenameHeader() throws IOException, InterruptedException {
  Context context = new Context();
  File f1 = new File(tmpDir.getAbsolutePath() + "/file1");

  Files.write("file1line1\nfile1line2\nfile1line3\nfile1line4\n" +
              "file1line5\nfile1line6\nfile1line7\nfile1line8\n",
              f1, Charsets.UTF_8);

  context.put(SpoolDirectorySourceConfigurationConstants.SPOOL_DIRECTORY,
      tmpDir.getAbsolutePath());
  context.put(SpoolDirectorySourceConfigurationConstants.FILENAME_HEADER,
      "true");
  context.put(SpoolDirectorySourceConfigurationConstants.FILENAME_HEADER_KEY,
      "fileHeaderKeyTest");

  Configurables.configure(source, context);
  source.start();
  Thread.sleep(500);
  Transaction txn = channel.getTransaction();
  txn.begin();
  Event e = channel.take();
  Assert.assertNotNull("Event must not be null", e);
  Assert.assertNotNull("Event headers must not be null", e.getHeaders());
  Assert.assertNotNull(e.getHeaders().get("fileHeaderKeyTest"));
  Assert.assertEquals(f1.getAbsolutePath(),
      e.getHeaders().get("fileHeaderKeyTest"));
  txn.commit();
  txn.close();
}
 
Example #17
Source Project: fiware-cygnus   Author: telefonicaid   File: NGSICKANSinkTest.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
private Context createContextforNativeTypes(String backendImpl, String backendMaxConns, String backendMaxConnsPerRoute,
                                            String batchSize, String batchTime, String batchTTL, String csvSeparator, String dataModel,
                                            String enableEncoding, String enableGrouping, String enableLowercase, String fileFormat, String host,
                                            String password, String port, String username, String hive, String krb5, String token,
                                            String serviceAsNamespace, String attrNativeTypes, String metadata) {
    Context context = new Context();
    context.put("backend.impl", backendImpl);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batchSize", batchSize);
    context.put("batchTime", batchTime);
    context.put("batchTTL", batchTTL);
    context.put("csv_separator", csvSeparator);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_grouping", enableLowercase);
    context.put("file_format", fileFormat);
    context.put("hdfs_host", host);
    context.put("hdfs_password", password);
    context.put("hdfs_port", port);
    context.put("hdfs_username", username);
    context.put("hive", hive);
    context.put("krb5_auth", krb5);
    context.put("oauth2_token", token);
    context.put("service_as_namespace", serviceAsNamespace);
    context.put("attr_native_types", attrNativeTypes);
    context.put("attr_metadata_store", metadata);
    return context;
}
 
Example #18
Source Project: ingestion   Author: Stratio   File: TimeBasedIndexNameBuilderTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  Context context = new Context();
  context.put(ElasticSearchSinkConstants.INDEX_NAME, "prefix");
  indexNameBuilder = new TimeBasedIndexNameBuilder();
  indexNameBuilder.configure(context);
}
 
Example #19
Source Project: flume-elasticsearch-sink   Author: cognitree   File: ElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
    String[] hosts = getHosts(context);
    if (ArrayUtils.isNotEmpty(hosts)) {
        client = new ElasticsearchClientBuilder(
                context.getString(PREFIX + ES_CLUSTER_NAME, DEFAULT_ES_CLUSTER_NAME), hosts)
                .build();
        buildIndexBuilder(context);
        buildSerializer(context);
        bulkProcessor = new BulkProcessorBulider().buildBulkProcessor(context, this);
    } else {
        logger.error("Could not create Rest client, No host exist");
    }
}
 
Example #20
Source Project: mt-flume   Author: javachen   File: NetcatSource.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  String hostKey = NetcatSourceConfigurationConstants.CONFIG_HOSTNAME;
  String portKey = NetcatSourceConfigurationConstants.CONFIG_PORT;
  String ackEventKey = NetcatSourceConfigurationConstants.CONFIG_ACKEVENT;

  Configurables.ensureRequiredNonNull(context, hostKey, portKey);

  hostName = context.getString(hostKey);
  port = context.getInteger(portKey);
  ackEveryEvent = context.getBoolean(ackEventKey, true);
  maxLineLength = context.getInteger(
      NetcatSourceConfigurationConstants.CONFIG_MAX_LINE_LENGTH,
      NetcatSourceConfigurationConstants.DEFAULT_MAX_LINE_LENGTH);
}
 
Example #21
Source Project: phoenix   Author: cloudera-labs   File: PhoenixSink.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the serializer for flume events.
 * @param eventSerializerType
 */
private void initializeSerializer(final Context context,final String eventSerializerType) {
    
   EventSerializers eventSerializer = null;
   try {
           eventSerializer =  EventSerializers.valueOf(eventSerializerType.toUpperCase());
    } catch(IllegalArgumentException iae) {
           logger.error("An invalid eventSerializer {} was passed. Please specify one of {} ",eventSerializerType,
                   Joiner.on(",").skipNulls().join(EventSerializers.values()));
           Throwables.propagate(iae);
    }
   
   final Context serializerContext = new Context();
   serializerContext.putAll(context.getSubProperties(FlumeConstants.CONFIG_SERIALIZER_PREFIX));
   copyPropertiesToSerializerContext(context,serializerContext);
         
   try {
     @SuppressWarnings("unchecked")
     Class<? extends EventSerializer> clazz = (Class<? extends EventSerializer>) Class.forName(eventSerializer.getClassName());
     serializer = clazz.newInstance();
     serializer.configure(serializerContext);
     
   } catch (Exception e) {
     logger.error("Could not instantiate event serializer." , e);
     Throwables.propagate(e);
   }
}
 
Example #22
Source Project: mt-flume   Author: javachen   File: TestMorphlineInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Test
/* leading XXXXX does not match regex, thus we expect the event to be dropped */
public void testGrokIfNotMatchDropEventDrop() throws Exception {
  Context context = new Context();
  context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM, RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
  String msg = "<XXXXXXXXXXXXX164>Feb  4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0 port 22.";
  Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
  Event actual = build(context).intercept(input);
  assertNull(actual);
}
 
Example #23
Source Project: ingestion   Author: Stratio   File: MongoSinkTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = MongoSinkException.class)
public void confSingleModeWithNoDefaultDB() throws Exception {
    final MongoSink mongoSink = new MongoSink();
    final Context context = new Context();
    context.put("dynamic", "false");
    context.put("mongoUri", "mongodb://localhost:10000");
    Configurables.configure(mongoSink, context);
}
 
Example #24
Source Project: ingestion   Author: Stratio   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostAndPortsWithWhitespaces() {
  parameters.put(HOSTNAMES,
      " 10.5.5.27 : 9300 , 10.5.5.28 : 9301 , 10.5.5.29 : 9302 ");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27:9300", "10.5.5.28:9301", "10.5.5.29:9302" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example #25
Source Project: ingestion   Author: Stratio   File: TestCassandraSink.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void confMissingCqlFileFails() {
  final CassandraSink sink = new CassandraSink();
  final Context context = new Context();
  context.put("tables", "keyspace.table");
  context.put("cqlFile", "/NOT/FOUND/MY.CQL");
  thrown.expect(ConfigurationException.class);
  thrown.expectMessage("Cannot read CQL file: /NOT/FOUND/MY.CQL");
  thrown.expectCause(new CauseMatcher(FileNotFoundException.class));
  sink.configure(context);
}
 
Example #26
Source Project: ingestion   Author: Stratio   File: RedisSourceTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    source = new RedisSource();
    channel = new MemoryChannel();

    Configurables.configure(channel, new Context());

    List<Channel> channels = new ArrayList<Channel>();
    channels.add(channel);

    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);

    source.setChannelProcessor(new ChannelProcessor(rcs));
}
 
Example #27
Source Project: phoenix   Author: forcedotcom   File: TestPhoenixSink.java    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test(expected= NullPointerException.class)
public void testInvalidConfiguration () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example #28
Source Project: ingestion   Author: Stratio   File: TestElasticSearchSink.java    License: Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostWithWhitespacesUsingDefaultPorts() {
  parameters.put(HOSTNAMES, " 10.5.5.27 , 10.5.5.28 , 10.5.5.29 ");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27", "10.5.5.28", "10.5.5.29" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example #29
Source Project: phoenix   Author: cloudera-labs   File: PhoenixSinkIT.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected=IllegalArgumentException.class)
public void testInvalidConfigurationOfSerializer () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,"csv");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example #30
Source Project: SparkOnALog   Author: tmalaska   File: CopyOfFlumeHBaseWordCountInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
	tableName = context.getString("hbase-table", "flumeCounter");
	columnFamilyName = context.getString("hbase-column-family", "C");
	flushIntervals = Integer.parseInt(context.getString(
			"hbase-flush-intervals", "3000"));
}