org.apache.flume.Context Java Examples

The following examples show how to use org.apache.flume.Context. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PhoenixSinkIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example #2
Source File: CustomLastfmHeaderAndBodyTextEventSerializerTest.java    From sequenceiq-samples with Apache License 2.0 6 votes vote down vote up
@Test
public void testNoNewline() throws FileNotFoundException, IOException {

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("header1", "value1");

    OutputStream out = new FileOutputStream(testFile);
    Context context = new Context();
    context.put("appendNewline", "false");
    CustomLastfmHeaderAndBodyTextEventSerializer.Builder builder = CustomLastfmHeaderAndBodyTextEventSerializer.builder();
    EventSerializer serializer = builder.build(new Context(), out);
    serializer.afterCreate();
    serializer.write(EventBuilder.withBody("event 1", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 2", Charsets.UTF_8, headers));
    serializer.write(EventBuilder.withBody("event 3", Charsets.UTF_8, headers));
    serializer.flush();
    serializer.beforeClose();
    out.flush();
    out.close();

    BufferedReader reader = new BufferedReader(new FileReader(testFile));
    Assert.assertNull(reader.readLine());
    reader.close();

    FileUtils.forceDelete(testFile);
}
 
Example #3
Source File: NGSIPostgreSQLSinkTest.java    From fiware-cygnus with GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContextforNativeTypes(String attrPersistence, String batchSize, String batchTime, String batchTTL,
                                            String dataModel, String enableEncoding, String enableGrouping, String enableLowercase, String host,
                                            String password, String port, String username, String cache, String attrNativeTypes) {
    Context context = new Context();
    context.put("attr_persistence", attrPersistence);
    context.put("batch_size", batchSize);
    context.put("batch_time", batchTime);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("postgresql_host", host);
    context.put("postgresql_password", password);
    context.put("postgresql_port", port);
    context.put("postgresql_username", username);
    context.put("backend.enable_cache", cache);
    context.put("attr_native_types", attrNativeTypes);
    return context;
}
 
Example #4
Source File: NGSICartoDBSinkTest.java    From fiware-cygnus with GNU Affero General Public License v3.0 6 votes vote down vote up
private Context createContext(String apiKey, String backendMaxConns, String backendMaxConnsPerRoute,
        String batchSize, String batchTimeout, String batchTTL, String dataModel, String enableDistanceHistoric,
        String enableGrouping, String enableLowercase, String enableRawHistoric, String enableRawSnapshot,
        String swapCoordinates, String keysConfFile) {
    Context context = new Context();
    context.put("api_key", apiKey);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batch_size", batchSize);
    context.put("batch_timeout", batchTimeout);
    context.put("batch_ttl", batchTTL);
    context.put("data_model", dataModel);
    context.put("enable_distance_historic", enableDistanceHistoric);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_lowercase", enableLowercase);
    context.put("enable_raw_historic", enableRawHistoric);
    context.put("enable_raw_snapshot", enableRawSnapshot);
    context.put("swap_coordinates", swapCoordinates);
    context.put("keys_conf_file", keysConfFile);
    return context;
}
 
Example #5
Source File: TestAvroSerializer.java    From flume-elasticsearch-sink with Apache License 2.0 6 votes vote down vote up
/**
 * tests Avro Serializer
 */
@Test
public void testSerializer() throws Exception {
    Context context = new Context();
    String schemaFile = getClass().getResource("/schema.avsc").getFile();
    context.put(ES_AVRO_SCHEMA_FILE, schemaFile);
    avroSerializer.configure(context);
    Schema schema = new Schema.Parser().parse(new File(schemaFile));
    GenericRecord user = generateGenericRecord(schema);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    Encoder encoder = new EncoderFactory().binaryEncoder(outputStream, null);
    datumWriter.write(user, encoder);
    encoder.flush();
    Event event = EventBuilder.withBody(outputStream.toByteArray());
    XContentBuilder expected = generateContentBuilder();
    XContentBuilder actual = avroSerializer.serialize(event);
    JsonParser parser = new JsonParser();
    assertEquals(parser.parse(Strings.toString(expected)), parser.parse(Strings.toString(actual)));
}
 
Example #6
Source File: TestFlumeThriftTarget.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  port = NetworkUtils.getRandomPort();
  source = new ThriftSource();
  ch = new MemoryChannel();
  Configurables.configure(ch, new Context());

  Context context = new Context();
  context.put("port", String.valueOf(port));
  context.put("bind", "localhost");
  Configurables.configure(source, context);

  List<Channel> channels = new ArrayList<>();
  channels.add(ch);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);
  source.setChannelProcessor(new ChannelProcessor(rcs));
  source.start();
}
 
Example #7
Source File: TestNetcatSource.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  logger.info("Running setup");

  channel = new MemoryChannel();
  source = new NetcatSource();

  Context context = new Context();

  Configurables.configure(channel, context);
  List<Channel> channels = Lists.newArrayList(channel);
  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));
}
 
Example #8
Source File: TestHostInterceptor.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
/**
 * Ensure host is NOT overwritten when preserveExisting=true.
 */
@Test
public void testPreserve() throws Exception {
  Context ctx = new Context();
  ctx.put("preserveExisting", "true");

  Interceptor.Builder builder = InterceptorBuilderFactory.newInstance(
          InterceptorType.HOST.toString());
  builder.configure(ctx);
  Interceptor interceptor = builder.build();

  final String ORIGINAL_HOST = "originalhost";
  Event eventBeforeIntercept = EventBuilder.withBody("test event",
          Charsets.UTF_8);
  eventBeforeIntercept.getHeaders().put(Constants.HOST, ORIGINAL_HOST);
  Assert.assertEquals(ORIGINAL_HOST,
          eventBeforeIntercept.getHeaders().get(Constants.HOST));

  String expectedHost = ORIGINAL_HOST;
  Event eventAfterIntercept = interceptor.intercept(eventBeforeIntercept);
  String actualHost = eventAfterIntercept.getHeaders().get(Constants.HOST);

  Assert.assertNotNull(actualHost);
  Assert.assertEquals(expectedHost, actualHost);
}
 
Example #9
Source File: NGSICartoDBSinkTest.java    From fiware-cygnus with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * [NGSICartoDBSink.configure] -------- Only enable_distance configuration works. This must be removed
 * once enable_raw parameter is removed (currently it is just deprecated).
 * @throws java.lang.Exception
 */
@Test
public void testConfigureEnableDistanceOnly() throws Exception {
    System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
            + "-------- Only enable_distance configuration works");
    // Create a NGSICartoDBSink
    Context context = new Context();
    context.put("enable_distance", "true");
    context.put("keys_conf_file", ""); // any value except for null
    NGSICartoDBSink sink = new NGSICartoDBSink();
    sink.configure(context);
    
    try {
        assertTrue(sink.getEnableDistanceHistoric());
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "-  OK  - Only 'enable_distance' was configured and worked");
    } catch (AssertionError e) {
        System.out.println(getTestTraceHead("[NGSICartoDBSink.configure]")
                + "- FAIL - Only 'enable_distance' was configured and did not work");
        throw e;
    } // try catch
}
 
Example #10
Source File: KafkaSource.java    From flume-ng-extends-source with MIT License 6 votes vote down vote up
/**
 * We configure the source and generate properties for the Kafka Consumer
 *
 * Kafka Consumer properties are generated as follows:
 *
 * 1. Generate a properties object with some static defaults that can be
 * overridden by Source configuration 2. We add the configuration users added
 * for Kafka (parameters starting with kafka. and must be valid Kafka Consumer
 * properties 3. We add the source documented parameters which can override
 * other properties
 *
 * @param context
 */
public void configure(Context context) {
  this.context = context;
  batchUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_SIZE,
          KafkaSourceConstants.DEFAULT_BATCH_SIZE);
  timeUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_DURATION_MS,
          KafkaSourceConstants.DEFAULT_BATCH_DURATION);
  topic = context.getString(KafkaSourceConstants.TOPIC);

  if(topic == null) {
    throw new ConfigurationException("Kafka topic must be specified.");
  }

  kafkaProps = KafkaSourceUtil.getKafkaProperties(context);
  consumerTimeout = Integer.parseInt(kafkaProps.getProperty(
          KafkaSourceConstants.CONSUMER_TIMEOUT));
  kafkaAutoCommitEnabled = Boolean.parseBoolean(kafkaProps.getProperty(
          KafkaSourceConstants.AUTO_COMMIT_ENABLED));

  if (counter == null) {
    counter = new KafkaSourceCounter(getName());
  }
}
 
Example #11
Source File: TestPhoenixSink.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Test
public void testInvalidTable() {
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    try {
        sink.start();
        fail();
    }catch(Exception e) {
        assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
    }
}
 
Example #12
Source File: XmlXpathDeserializer.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Override
public EventDeserializer build(Context context, ResettableInputStream in) {
    if (!(in instanceof Seekable)) {
        throw new IllegalArgumentException(
                "Cannot use this deserializer without a Seekable input stream");
    }
    try {
        return new XmlXpathDeserializer(context, in);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}
 
Example #13
Source File: CopyOfFlumeHBaseWordCountInterceptor.java    From SparkOnALog with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
	tableName = context.getString("hbase-table", "flumeCounter");
	columnFamilyName = context.getString("hbase-column-family", "C");
	flushIntervals = Integer.parseInt(context.getString(
			"hbase-flush-intervals", "3000"));
}
 
Example #14
Source File: TestElasticSearchSink.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldIndexFiveEvents() throws Exception {
  // Make it so we only need to call process once
  parameters.put(BATCH_SIZE, "5");
  Configurables.configure(fixture, new Context(parameters));
  Channel channel = bindAndStartChannel(fixture);

  int numberOfEvents = 5;
  Event[] events = new Event[numberOfEvents];

  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < numberOfEvents; i++) {
    String body = "event #" + i + " of " + numberOfEvents;
    Event event = EventBuilder.withBody(body.getBytes());
    events[i] = event;
    channel.put(event);
  }
  tx.commit();
  tx.close();

  fixture.process();
  fixture.stop();
  client.admin().indices()
      .refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

  assertMatchAllQuery(numberOfEvents, events);
  assertBodyQuery(5, events);
}
 
Example #15
Source File: TestElasticSearchSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldParseMultipleHostUsingDefaultPorts() {
  parameters.put(HOSTNAMES, "10.5.5.27,10.5.5.28,10.5.5.29");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  InetSocketTransportAddress[] expected = {
      new InetSocketTransportAddress("10.5.5.27", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.28", DEFAULT_PORT),
      new InetSocketTransportAddress("10.5.5.29", DEFAULT_PORT) };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example #16
Source File: TestUUIDInterceptor.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrefix() throws Exception {
  Context context = new Context();
  context.put(UUIDInterceptor.HEADER_NAME, ID);
  context.put(UUIDInterceptor.PREFIX_NAME, "bar#");
  Event event = new SimpleEvent();
  assertTrue(build(context).intercept(event).getHeaders().get(ID).startsWith("bar#"));
}
 
Example #17
Source File: FlumeESSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example #18
Source File: HDFSCompressedDataStream.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  super.configure(context);

  serializerType = context.getString("serializer", "TEXT");
  useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
      false);
  serializerContext = new Context(
      context.getSubProperties(EventSerializer.CTX_PREFIX));
  logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = "
      + useRawLocalFileSystem);
}
 
Example #19
Source File: HDFSStorageTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private HDFSStorage getStorage(String id, boolean restore)
{
  Context ctx = new Context();
  STORAGE_DIRECTORY = testMeta.baseDir;
  ctx.put(HDFSStorage.BASE_DIR_KEY, testMeta.baseDir);
  ctx.put(HDFSStorage.RESTORE_KEY, Boolean.toString(restore));
  ctx.put(HDFSStorage.ID, id);
  ctx.put(HDFSStorage.BLOCKSIZE, "256");
  HDFSStorage lstorage = new HDFSStorage();
  lstorage.configure(ctx);
  lstorage.setup(null);
  return lstorage;
}
 
Example #20
Source File: KafkaSourceUtil.java    From flume-ng-extends-source with MIT License 5 votes vote down vote up
public static Properties getKafkaProperties(Context context) {
  log.info("context={}",context.toString());
  Properties props =  generateDefaultKafkaProps();
  setKafkaProps(context,props);
  addDocumentedKafkaProps(context,props);
  return props;
}
 
Example #21
Source File: AvroEventDeserializer.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public EventDeserializer build(Context context, ResettableInputStream in) {
  if (!(in instanceof RemoteMarkable)) {
    throw new IllegalArgumentException("Cannot use this deserializer " +
        "without a RemoteMarkable input stream");
  }
  AvroEventDeserializer deserializer
      = new AvroEventDeserializer(context, in);
  try {
    deserializer.initialize();
  } catch (Exception e) {
    throw new FlumeException("Cannot instantiate deserializer", e);
  }
  return deserializer;
}
 
Example #22
Source File: FlumeEventAvroEventDeserializer.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public EventDeserializer build(Context context, ResettableInputStream in) {
  if (!(in instanceof RemoteMarkable)) {
    throw new IllegalArgumentException("Cannot use this deserializer " +
        "without a RemoteMarkable input stream");
  }
  FlumeEventAvroEventDeserializer deserializer
      = new FlumeEventAvroEventDeserializer(context, in);
  try {
    deserializer.initialize();
  } catch (Exception e) {
    throw new FlumeException("Cannot instantiate deserializer", e);
  }
  return deserializer;
}
 
Example #23
Source File: JavaLogAvroEventSerializer.java    From flume-plugins with MIT License 5 votes vote down vote up
@Override
public EventSerializer build(Context context, OutputStream out) {
    JavaLogAvroEventSerializer writer = null;
    try {
        writer = new JavaLogAvroEventSerializer(out);
        writer.configure(context);
    } catch (IOException e) {
        log.error("Unable to parse schema file. Exception follows.", e);
    }
    return writer;
}
 
Example #24
Source File: PhoenixSinkIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test(expected=IllegalArgumentException.class)
public void testInvalidConfigurationOfSerializer () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,"csv");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example #25
Source File: MongoSinkDynamicTest.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
Example #26
Source File: TestElasticSearchSink.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Ignore @Test
public void shouldParseMultipleHostWithWhitespacesUsingDefaultPorts() {
  parameters.put(HOSTNAMES, " 10.5.5.27 , 10.5.5.28 , 10.5.5.29 ");

  fixture = new ElasticSearchSink();
  fixture.configure(new Context(parameters));

  String[] expected = { "10.5.5.27", "10.5.5.28", "10.5.5.29" };

  assertArrayEquals(expected, fixture.getServerAddresses());
}
 
Example #27
Source File: TestPhoenixSink.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test(expected= NullPointerException.class)
public void testInvalidConfiguration () {
    
    sinkContext = new Context ();
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());

    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
}
 
Example #28
Source File: TestSourceConfiguration.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
/**
 * Test fails without FLUME-1847
 */
@Test(expected = ConfigurationException.class)
public void testFLUME1847() throws Exception {
  Context context = new Context();
  context.put("type", "something");
  SourceConfiguration sourceConfig = new SourceConfiguration("src");
  sourceConfig.configure(context);

}
 
Example #29
Source File: NGSICKANSinkTest.java    From fiware-cygnus with GNU Affero General Public License v3.0 5 votes vote down vote up
private Context createContextforNativeTypes(String backendImpl, String backendMaxConns, String backendMaxConnsPerRoute,
                                            String batchSize, String batchTime, String batchTTL, String csvSeparator, String dataModel,
                                            String enableEncoding, String enableGrouping, String enableLowercase, String fileFormat, String host,
                                            String password, String port, String username, String hive, String krb5, String token,
                                            String serviceAsNamespace, String attrNativeTypes, String metadata) {
    Context context = new Context();
    context.put("backend.impl", backendImpl);
    context.put("backend.max_conns", backendMaxConns);
    context.put("backend.max_conns_per_route", backendMaxConnsPerRoute);
    context.put("batchSize", batchSize);
    context.put("batchTime", batchTime);
    context.put("batchTTL", batchTTL);
    context.put("csv_separator", csvSeparator);
    context.put("data_model", dataModel);
    context.put("enable_encoding", enableEncoding);
    context.put("enable_grouping", enableGrouping);
    context.put("enable_grouping", enableLowercase);
    context.put("file_format", fileFormat);
    context.put("hdfs_host", host);
    context.put("hdfs_password", password);
    context.put("hdfs_port", port);
    context.put("hdfs_username", username);
    context.put("hive", hive);
    context.put("krb5_auth", krb5);
    context.put("oauth2_token", token);
    context.put("service_as_namespace", serviceAsNamespace);
    context.put("attr_native_types", attrNativeTypes);
    context.put("attr_metadata_store", metadata);
    return context;
}
 
Example #30
Source File: RedisSourceTest.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    source = new RedisSource();
    channel = new MemoryChannel();

    Configurables.configure(channel, new Context());

    List<Channel> channels = new ArrayList<Channel>();
    channels.add(channel);

    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);

    source.setChannelProcessor(new ChannelProcessor(rcs));
}