Java Code Examples for org.apache.flume.conf.Configurables#configure()

The following examples show how to use org.apache.flume.conf.Configurables#configure() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlumeAgentServiceImpl.java    From searchanalytics-bigdata with MIT License 6 votes vote down vote up
private void createSparkAvroSink() {
	sparkAvroChannel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(sparkAvroChannel, channelContext);
	String channelName = "SparkAvroMemoryChannel-" + UUID.randomUUID();
	sparkAvroChannel.setName(channelName);

	sparkAvroSink = new AvroSink();
	sparkAvroSink.setName("SparkAvroSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "avro");
	paramters.put("hostname", "localhost");
	paramters.put("port", "41111");
	paramters.put("batch-size", "100");
	Context sinkContext = new Context(paramters);
	sparkAvroSink.configure(sinkContext);
	Configurables.configure(sparkAvroSink, sinkContext);
	sparkAvroSink.setChannel(sparkAvroChannel);

	sparkAvroChannel.start();
	sparkAvroSink.start();
}
 
Example 2
Source File: TestMemoryChannel.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testCapacityBufferEmptyingAfterRollback() {
  Context context = new Context();
  Map<String, String> parms = new HashMap<String, String>();
  parms.put("capacity", "3");
  parms.put("transactionCapacity", "3");
  context.putAll(parms);
  Configurables.configure(channel,  context);

  Transaction tx = channel.getTransaction();
  tx.begin();
  channel.put(EventBuilder.withBody("test".getBytes()));
  channel.put(EventBuilder.withBody("test".getBytes()));
  channel.put(EventBuilder.withBody("test".getBytes()));
  tx.rollback();
  tx.close();

  tx = channel.getTransaction();
  tx.begin();
  channel.put(EventBuilder.withBody("test".getBytes()));
  channel.put(EventBuilder.withBody("test".getBytes()));
  channel.put(EventBuilder.withBody("test".getBytes()));
  tx.commit();
  tx.close();
}
 
Example 3
Source File: SNMPSourceTestIT.java    From ingestion with Apache License 2.0 6 votes vote down vote up
@Test
@Ignore
public void testV1NoAuth() throws InterruptedException, IOException {
    Context context = new Context();
    context.put(CONF_TRAP_PORT, STMP_TRAP_PORT);
    context.put(CONF_SNMP_TRAP_VERSION, "V1");

    Configurables.configure(source, context);
    source.start();

    while (source.getSourceCounter().getEventAcceptedCount() < 1) {
        SNMPUtils.sendTrapV1(STMP_TRAP_PORT);
        Thread.sleep(10);
    }

    checkEventsChannel();
}
 
Example 4
Source File: TestSpoolDirectorySource.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  source = new SpoolDirectorySource();
  channel = new MemoryChannel();

  Configurables.configure(channel, new Context());

  List<Channel> channels = new ArrayList<Channel>();
  channels.add(channel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));
  tmpDir = Files.createTempDir();
}
 
Example 5
Source File: TestReplicatingChannelSelector.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testOptionalChannels() throws Exception {
  Context context = new Context();
  context.put(ReplicatingChannelSelector.CONFIG_OPTIONAL, "ch1");
  Configurables.configure(selector, context);
  List<Channel> channels = selector.getRequiredChannels(new MockEvent());
  Assert.assertNotNull(channels);
  Assert.assertEquals(3, channels.size());
  Assert.assertEquals("ch2", channels.get(0).getName());
  Assert.assertEquals("ch3", channels.get(1).getName());
  Assert.assertEquals("ch4", channels.get(2).getName());

  List<Channel> optCh = selector.getOptionalChannels(new MockEvent());
  Assert.assertEquals(1, optCh.size());
  Assert.assertEquals("ch1", optCh.get(0).getName());

}
 
Example 6
Source File: TestMemoryChannel.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testNullEmptyEvent() {
  Context context = new Context();
  Map<String, String> parms = new HashMap<String, String>();
  parms.put("byteCapacity", "2000");
  parms.put("byteCapacityBufferPercentage", "20");
  context.putAll(parms);
  Configurables.configure(channel,  context);

  Transaction tx = channel.getTransaction();
  tx.begin();
  //This line would cause a NPE without FLUME-1622.
  channel.put(EventBuilder.withBody(null));
  tx.commit();
  tx.close();

  tx = channel.getTransaction();
  tx.begin();
  channel.put(EventBuilder.withBody(new byte[0]));
  tx.commit();
  tx.close();


}
 
Example 7
Source File: TestThriftSource.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testAppend() throws Exception {
  client = RpcClientFactory.getThriftInstance(props);
  Context context = new Context();
  channel.configure(context);
  configureSource();
  context.put(ThriftSource.CONFIG_BIND, "0.0.0.0");
  context.put(ThriftSource.CONFIG_PORT, String.valueOf(port));
  Configurables.configure(source, context);
  source.start();
  for(int i = 0; i < 30; i++) {
    client.append(EventBuilder.withBody(String.valueOf(i).getBytes()));
  }
  Transaction transaction = channel.getTransaction();
  transaction.begin();

  for (int i = 0; i < 30; i++) {
    Event event = channel.take();
    Assert.assertNotNull(event);
    Assert.assertEquals(String.valueOf(i), new String(event.getBody()));
  }
  transaction.commit();
  transaction.close();
}
 
Example 8
Source File: FlumeHDFSSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
	sink = new HDFSEventSink();
	sink.setName("HDFSEventSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("HDFSEventSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put("hdfs.type", "hdfs");
	String hdfsBasePath = hadoopClusterService.getHDFSUri()
			+ "/searchevents";
	paramters.put("hdfs.path", hdfsBasePath + "/%Y/%m/%d/%H");
	paramters.put("hdfs.filePrefix", "searchevents");
	paramters.put("hdfs.fileType", "DataStream");
	paramters.put("hdfs.rollInterval", "0");
	paramters.put("hdfs.rollSize", "0");
	paramters.put("hdfs.idleTimeout", "1");
	paramters.put("hdfs.rollCount", "0");
	paramters.put("hdfs.batchSize", "1000");
	paramters.put("hdfs.useLocalTimeStamp", "true");

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example 9
Source File: FlumeESSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example 10
Source File: TestSequenceGeneratorSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void testBatchProcessWithLifeCycle() throws InterruptedException, LifecycleException,
    EventDeliveryException {

  int batchSize = 10;

  Channel channel = new PseudoTxnMemoryChannel();
  Context context = new Context();

  context.put("logicalNode.name", "test");
  context.put("batchSize", Integer.toString(batchSize));

  Configurables.configure(source, context);
  Configurables.configure(channel, context);

  List<Channel> channels = new ArrayList<Channel>();
  channels.add(channel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));

  source.start();

  for (long i = 0; i < 100; i++) {
    source.process();

    for (long j = batchSize; j > 0; j--) {
      Event event = channel.take();
      String expectedVal = String.valueOf(((i+1)*batchSize)-j);
      String resultedVal = new String(event.getBody());
      Assert.assertTrue("Expected " + expectedVal + " is not equals to " +
          resultedVal, expectedVal.equals(resultedVal));
    }
  }

  source.stop();
}
 
Example 11
Source File: TestElasticSearchSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldIndexFiveEvents() throws Exception {
  // Make it so we only need to call process once
  parameters.put(BATCH_SIZE, "5");
  Configurables.configure(fixture, new Context(parameters));
  Channel channel = bindAndStartChannel(fixture);

  int numberOfEvents = 5;
  Event[] events = new Event[numberOfEvents];

  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < numberOfEvents; i++) {
    String body = "event #" + i + " of " + numberOfEvents;
    Event event = EventBuilder.withBody(body.getBytes());
    events[i] = event;
    channel.put(event);
  }
  tx.commit();
  tx.close();

  fixture.process();
  fixture.stop();
  client.admin().indices()
      .refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

  assertMatchAllQuery(numberOfEvents, events);
  assertBodyQuery(5, events);
}
 
Example 12
Source File: TestHDFSEventSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Test
public void testKerbFileAccess() throws InterruptedException,
    LifecycleException, EventDeliveryException, IOException {
  LOG.debug("Starting testKerbFileAccess() ...");
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  String newPath = testPath + "/singleBucket";
  String kerbConfPrincipal = "user1/[email protected]";
  String kerbKeytab = "/usr/lib/flume/nonexistkeytabfile";

  //turn security on
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);

  Context context = new Context();
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.kerberosPrincipal", kerbConfPrincipal);
  context.put("hdfs.kerberosKeytab", kerbKeytab);

  try {
    Configurables.configure(sink, context);
    Assert.fail("no exception thrown");
  } catch (IllegalArgumentException expected) {
    Assert.assertTrue(expected.getMessage().contains(
        "is nonexistent or can't read."));
  } finally {
    //turn security off
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
        "simple");
    UserGroupInformation.setConfiguration(conf);
  }
}
 
Example 13
Source File: FlumeThriftService.java    From bahir-flink with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    //Flume Source
    ThriftSource source = new ThriftSource();
    Channel ch = new MemoryChannel();
    Configurables.configure(ch, new Context());

    Context context = new Context();
    context.put("port", String.valueOf(port));
    context.put("bind", hostname);
    Configurables.configure(source, context);

    List<Channel> channels = new ArrayList<>();
    channels.add(ch);
    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);
    source.setChannelProcessor(new ChannelProcessor(rcs));
    source.start();
    System.out.println("ThriftSource service start.");

    while (true) {
        Transaction transaction = ch.getTransaction();
        transaction.begin();
        Event event = ch.take();
        if (null != event) {
            System.out.println(event);
            System.out.println(new String(event.getBody()).trim());
        }
        transaction.commit();
        transaction.close();
    }

}
 
Example 14
Source File: TestHBaseSink.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipleBatches() throws Exception {
  testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
  ctx.put("batchSize", "2");
  HBaseSink sink = new HBaseSink(testUtility.getConfiguration());
  Configurables.configure(sink, ctx);
  //Reset the context to a higher batchSize
  ctx.put("batchSize", "100");
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  for(int i = 0; i < 3; i++){
    Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
    channel.put(e);
  }
  tx.commit();
  tx.close();
  int count = 0;
  Status status = Status.READY;
  while(status != Status.BACKOFF){
    count++;
    status = sink.process();
  }
  sink.stop();
  Assert.assertEquals(2, count);
  HTable table = new HTable(testUtility.getConfiguration(), tableName);
  byte[][] results = getResults(table, 3);
  byte[] out;
  int found = 0;
  for(int i = 0; i < 3; i++){
    for(int j = 0; j < 3; j++){
      if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){
        found++;
        break;
      }
    }
  }
  Assert.assertEquals(3, found);
  out = results[3];
  Assert.assertArrayEquals(Longs.toByteArray(3), out);
  testUtility.deleteTable(tableName.getBytes());
}
 
Example 15
Source File: RegexEventSerializerIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testApacheLogRegex() throws Exception {
    
    sinkContext = new Context ();
    final String fullTableName = "s1.apachelogs";
    final String logRegex = "([^ ]*) ([^ ]*) ([^ ]*) (-|\\[[^\\]]*\\]) \"([^ ]+) ([^ ]+)" +
                            " ([^\"]+)\" (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\"[^\"]*\")" +
                            " ([^ \"]*|\"[^\"]*\"))?";
    
    final String columns = "host,identity,user,time,method,request,protocol,status,size,referer,agent";
    
    String ddl = "CREATE TABLE " + fullTableName +
            "  (uid VARCHAR NOT NULL, user VARCHAR, time varchar, host varchar , identity varchar, method varchar, request varchar , protocol varchar," +
            "  status integer , size integer , referer varchar , agent varchar CONSTRAINT pk PRIMARY KEY (uid))\n";
   
    sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName);
    sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
    sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,logRegex);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,columns);
    sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.UUID.name());
   
    String message1 = "33.22.11.00 - user1 [12/Dec/2013:07:01:19 +0000] " +
            "\"GET /wp-admin/css/install.css HTTP/1.0\" 200 813 " + 
            "\"http://www.google.com\" \"Mozilla/5.0 (comp" +
            "atible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)\"";
    
    String message2 = "192.168.20.1 - user2 [13/Dec/2013:06:05:19 +0000] " +
            "\"GET /wp-admin/css/install.css HTTP/1.0\" 400 363 " + 
            "\"http://www.salesforce.com/in/?ir=1\" \"Mozilla/5.0 (comp" +
            "atible;)\"";
    
    
    sink = new PhoenixSink();
    Configurables.configure(sink, sinkContext);
    assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
  
    final Channel channel = this.initChannel();
    sink.setChannel(channel);
    
    sink.start();
    
    final Event event1 = EventBuilder.withBody(Bytes.toBytes(message1));
    final Event event2 = EventBuilder.withBody(Bytes.toBytes(message2));
    
    final Transaction transaction = channel.getTransaction();
    transaction.begin();
    channel.put(event1);
    channel.put(event2);
    transaction.commit();
    transaction.close();

    sink.process();
   
    final String query = " SELECT * FROM \n " + fullTableName;
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    final ResultSet rs ;
    final Connection conn = DriverManager.getConnection(getUrl(), props);
    try{
        rs = conn.createStatement().executeQuery(query);
        assertTrue(rs.next());
        assertTrue(rs.next());
         
    }finally {
        if(conn != null) {
            conn.close();
        }
    }
    sink.stop();
    assertEquals(LifecycleState.STOP, sink.getLifecycleState());
    
}
 
Example 16
Source File: TestAvroSource.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
private void doRequest(boolean serverEnableCompression, boolean clientEnableCompression, int compressionLevel) throws InterruptedException, IOException {
  boolean bound = false;

  for (int i = 0; i < 100 && !bound; i++) {
    try {
      Context context = new Context();
      context.put("port", String.valueOf(selectedPort = 41414 + i));
      context.put("bind", "0.0.0.0");
      context.put("threads", "50");
      if (serverEnableCompression) {
        context.put("compression-type", "deflate");
      } else {
        context.put("compression-type", "none");
      }

      Configurables.configure(source, context);

      source.start();
      bound = true;
    } catch (ChannelException e) {
      /*
       * NB: This assume we're using the Netty server under the hood and the
       * failure is to bind. Yucky.
       */
    }
  }

  Assert
      .assertTrue("Reached start or error", LifecycleController.waitForOneOf(
          source, LifecycleState.START_OR_ERROR));
  Assert.assertEquals("Server is started", LifecycleState.START,
      source.getLifecycleState());

  AvroSourceProtocol client;
  if (clientEnableCompression) {
    client = SpecificRequestor.getClient(
        AvroSourceProtocol.class, new NettyTransceiver(new InetSocketAddress(
            selectedPort), new CompressionChannelFactory(6)));
  } else {
    client = SpecificRequestor.getClient(
        AvroSourceProtocol.class, new NettyTransceiver(new InetSocketAddress(
            selectedPort)));
  }

  AvroFlumeEvent avroEvent = new AvroFlumeEvent();

  avroEvent.setHeaders(new HashMap<CharSequence, CharSequence>());
  avroEvent.setBody(ByteBuffer.wrap("Hello avro".getBytes()));

  Status status = client.append(avroEvent);

  Assert.assertEquals(Status.OK, status);

  Transaction transaction = channel.getTransaction();
  transaction.begin();

  Event event = channel.take();
  Assert.assertNotNull(event);
  Assert.assertEquals("Channel contained our event", "Hello avro",
      new String(event.getBody()));
  transaction.commit();
  transaction.close();

  logger.debug("Round trip event:{}", event);

  source.stop();
  Assert.assertTrue("Reached stop or error",
      LifecycleController.waitForOneOf(source, LifecycleState.STOP_OR_ERROR));
  Assert.assertEquals("Server is stopped", LifecycleState.STOP,
      source.getLifecycleState());
}
 
Example 17
Source File: TwitterSourceTest.java    From fiware-cygnus with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testBasic() throws Exception {
    System.out.println(getTestTraceHead("[TwitterSourceTest.basic]")
            + "-------- Start source.");
    Context context = new Context();
    context.put("consumerKey", consumerKey);
    context.put("consumerSecret", consumerSecret);
    context.put("accessToken", accessToken);
    context.put("accessTokenSecret", accessTokenSecret);
    context.put("maxBatchDurationMillis", "1000");

    TwitterSource source = new TwitterSource();
    source.configure(context);

    Map<String, String> channelContext = new HashMap();
    channelContext.put("capacity", "1000000");
    channelContext.put("keep-alive", "0"); // for faster tests
    Channel channel = new MemoryChannel();
    Configurables.configure(channel, new Context(channelContext));

    Sink sink = new LoggerSink();
    sink.setChannel(channel);
    sink.start();
    DefaultSinkProcessor proc = new DefaultSinkProcessor();
    proc.setSinks(Collections.singletonList(sink));
    SinkRunner sinkRunner = new SinkRunner(proc);
    sinkRunner.start();

    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(Collections.singletonList(channel));
    ChannelProcessor chp = new ChannelProcessor(rcs);
    source.setChannelProcessor(chp);

    try {
        source.start();

        Thread.sleep(500);
        source.stop();
        System.out.println(getTestTraceHead("[TwitterSourceTest.basic]")
                + "-  OK  - Twitter source started properly.");
    } catch (AssertionError e) {
        System.out.println(getTestTraceHead("[TwitterSourceTest.basic]")
                + "- FAIL - Twitter source could not start.");
        throw e;
    } // try catch
    sinkRunner.stop();
    sink.stop();
}
 
Example 18
Source File: TestHDFSEventSink.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
/**
 * Ensure that when a write throws an IOException we are
 * able to continue to progress in the next process() call.
 * This relies on Transactional rollback semantics for durability and
 * the behavior of the BucketWriter class of close()ing upon IOException.
 */
@Test
public void testCloseReopen() throws InterruptedException,
    LifecycleException, EventDeliveryException, IOException {

  LOG.debug("Starting...");
  final int numBatches = 4;
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  String newPath = testPath + "/singleBucket";
  int i = 1, j = 1;

  HDFSBadWriterFactory badWriterFactory = new HDFSBadWriterFactory();
  sink = new HDFSEventSink(badWriterFactory);

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  Context context = new Context();

  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.fileType", HDFSBadWriterFactory.BadSequenceFileType);

  Configurables.configure(sink, context);

  MemoryChannel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();
  // push the event batches into channel
  for (i = 1; i < numBatches; i++) {
    channel.getTransaction().begin();
    try {
      for (j = 1; j <= batchSize; j++) {
        Event event = new SimpleEvent();
        eventDate.clear();
        eventDate.set(2011, i, i, i, 0); // yy mm dd
        event.getHeaders().put("timestamp",
            String.valueOf(eventDate.getTimeInMillis()));
        event.getHeaders().put("hostname", "Host" + i);
        String body = "Test." + i + "." + j;
        event.setBody(body.getBytes());
        bodies.add(body);
        // inject fault
        event.getHeaders().put("fault-until-reopen", "");
        channel.put(event);
      }
      channel.getTransaction().commit();
    } finally {
      channel.getTransaction().close();
    }
    LOG.info("execute sink to process the events: " + sink.process());
  }
  LOG.info("clear any events pending due to errors: " + sink.process());
  sink.stop();

  verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
Example 19
Source File: TestAvroSource.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@Test
public void testSslRequest() throws InterruptedException, IOException {
  boolean bound = false;

  for (int i = 0; i < 10 && !bound; i++) {
    try {
      Context context = new Context();

      context.put("port", String.valueOf(selectedPort = 41414 + i));
      context.put("bind", "0.0.0.0");
      context.put("ssl", "true");
      context.put("keystore", "src/test/resources/server.p12");
      context.put("keystore-password", "password");
      context.put("keystore-type", "PKCS12");

      Configurables.configure(source, context);

      source.start();
      bound = true;
    } catch (ChannelException e) {
      /*
       * NB: This assume we're using the Netty server under the hood and the
       * failure is to bind. Yucky.
       */
      Thread.sleep(100);
    }
  }

  Assert
      .assertTrue("Reached start or error", LifecycleController.waitForOneOf(
          source, LifecycleState.START_OR_ERROR));
  Assert.assertEquals("Server is started", LifecycleState.START,
      source.getLifecycleState());

  AvroSourceProtocol client = SpecificRequestor.getClient(
      AvroSourceProtocol.class, new NettyTransceiver(new InetSocketAddress(
      selectedPort), new SSLChannelFactory()));

  AvroFlumeEvent avroEvent = new AvroFlumeEvent();

  avroEvent.setHeaders(new HashMap<CharSequence, CharSequence>());
  avroEvent.setBody(ByteBuffer.wrap("Hello avro ssl".getBytes()));

  Status status = client.append(avroEvent);

  Assert.assertEquals(Status.OK, status);

  Transaction transaction = channel.getTransaction();
  transaction.begin();

  Event event = channel.take();
  Assert.assertNotNull(event);
  Assert.assertEquals("Channel contained our event", "Hello avro ssl",
      new String(event.getBody()));
  transaction.commit();
  transaction.close();

  logger.debug("Round trip event:{}", event);

  source.stop();
  Assert.assertTrue("Reached stop or error",
      LifecycleController.waitForOneOf(source, LifecycleState.STOP_OR_ERROR));
  Assert.assertEquals("Server is stopped", LifecycleState.STOP,
      source.getLifecycleState());
}
 
Example 20
Source File: TestAvroSink.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@Test
public void testSslSinkWithNonTrustedCert() throws InterruptedException,
    EventDeliveryException, InstantiationException, IllegalAccessException {
  setUp();
  Event event = EventBuilder.withBody("test event 1", Charsets.UTF_8);
  Server server = createSslServer(new MockAvroServer());

  server.start();

  Context context = new Context();

  context.put("hostname", hostname);
  context.put("port", String.valueOf(port));
  context.put("ssl", String.valueOf(true));
  context.put("batch-size", String.valueOf(2));
  context.put("connect-timeout", String.valueOf(2000L));
  context.put("request-timeout", String.valueOf(3000L));

  Configurables.configure(sink, context);

  sink.start();
  Assert.assertTrue(LifecycleController.waitForOneOf(sink,
      LifecycleState.START_OR_ERROR, 5000));

  Transaction transaction = channel.getTransaction();

  transaction.begin();
  for (int i = 0; i < 10; i++) {
    channel.put(event);
  }
  transaction.commit();
  transaction.close();

  boolean failed = false;
  try {
    for (int i = 0; i < 5; i++) {
      sink.process();
      failed = true;
    }
  } catch (EventDeliveryException ex) {
    logger.info("Correctly failed to send event", ex);
  }


  sink.stop();
  Assert.assertTrue(LifecycleController.waitForOneOf(sink,
      LifecycleState.STOP_OR_ERROR, 5000));

  server.close();

  if (failed) {
    Assert.fail("SSL-enabled sink successfully connected to a server with an untrusted certificate when it should have failed");
  }
}