com.amazonaws.services.logs.model.InputLogEvent Java Examples

The following examples show how to use com.amazonaws.services.logs.model.InputLogEvent. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CloudWatchAppender.java    From cloudwatch-logback-appender with ISC License 6 votes vote down vote up
@Override
public int compare(InputLogEvent o1, InputLogEvent o2) {
	if (o1.getTimestamp() == null) {
		if (o2.getTimestamp() == null) {
			return 0;
		} else {
			// null - long
			return -1;
		}
	} else if (o2.getTimestamp() == null) {
		// long - null
		return 1;
	} else {
		return o1.getTimestamp().compareTo(o2.getTimestamp());
	}
}
 
Example #2
Source File: CloudwatchLogsLogEventPutter.java    From cloudwatchlogs-java-appender with Apache License 2.0 6 votes vote down vote up
private void flush() {
    Collections.sort(eventBatch, new Comparator<InputLogEvent>() {
        @Override
        public int compare(InputLogEvent o1, InputLogEvent o2) {
            return o1.getTimestamp().compareTo(o2.getTimestamp());
        }
    });

    if (enabled) {
        processedCount.addAndGet(doFlush());
    } else if (config.isStdoutFallback()) {
        for (InputLogEvent event : eventBatch) {
            printWithTimestamp(new Date(event.getTimestamp()), logGroupName + " " + app + " " + event.getMessage());
        }
        processedCount.addAndGet(eventBatch.size());
    }

    eventBatch = new ArrayList<>();
    batchSize = 0;
    lastFlush = System.nanoTime();
}
 
Example #3
Source File: AsyncWorker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 6 votes vote down vote up
private void flush(boolean all) {
    try {
        long lostCount = this.lostCount.getAndSet(0);
        if (lostCount > 0) {
            getAwsLogsAppender().addWarn(lostCount + " events lost");
        }
        if (!queue.isEmpty()) {
            do {
                Collection<InputLogEvent> batch = drainBatchFromQueue();
                getAwsLogsAppender().getAwsLogsStub().logEvents(batch);
            } while (queue.size() >= maxBatchLogEvents || (all && !queue.isEmpty()));
        }
    } catch (Exception e) {
        getAwsLogsAppender().addError("Unable to flush events to AWS", e);
    }
}
 
Example #4
Source File: Worker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 5 votes vote down vote up
InputLogEvent asInputLogEvent(ILoggingEvent event) {
    InputLogEvent inputLogEvent = new InputLogEvent().withTimestamp(event.getTimeStamp())
            .withMessage(awsLogsAppender.getLayout().doLayout(event));

    if (eventSize(inputLogEvent) > MAX_EVENT_SIZE) {
        awsLogsAppender
                .addWarn(String.format("Log message exceeded Cloudwatch Log's limit of %d bytes", MAX_EVENT_SIZE));
        trimMessage(inputLogEvent, MAX_EVENT_SIZE);
    }

    return inputLogEvent;
}
 
Example #5
Source File: Worker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 5 votes vote down vote up
private static final void trimMessage(InputLogEvent event, int eventSize) {
    int trimmedMessageSize = eventSize - EVENT_SIZE_PADDING - ELLIPSIS.getBytes(EVENT_SIZE_CHARSET).length;
    byte[] message = event.getMessage().getBytes(EVENT_SIZE_CHARSET);

    String unsafeTrimmed = new String(message, 0, trimmedMessageSize + 1, EVENT_SIZE_CHARSET);
    // The last character might be a chopped UTF-8 character
    String trimmed = unsafeTrimmed.substring(0, unsafeTrimmed.length() - 1);

    event.setMessage(trimmed + ELLIPSIS);
}
 
Example #6
Source File: AsyncWorker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 5 votes vote down vote up
AsyncWorker(AwsLogsAppender awsLogsAppender) {
    super(awsLogsAppender);
    maxBatchLogEvents = awsLogsAppender.getMaxBatchLogEvents();
    discardThreshold = (int) Math.ceil(maxBatchLogEvents * 1.5);
    running = new AtomicBoolean(false);
    queue = new ArrayBlockingQueue<InputLogEvent>(maxBatchLogEvents * 2);
    lostCount = new AtomicLong(0);
}
 
Example #7
Source File: AsyncWorker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 5 votes vote down vote up
private Collection<InputLogEvent> drainBatchFromQueue() {
    Deque<InputLogEvent> batch = new ArrayDeque<InputLogEvent>(maxBatchLogEvents);
    queue.drainTo(batch, MAX_BATCH_LOG_EVENTS);
    int batchSize = batchSize(batch);
    while (batchSize > MAX_BATCH_SIZE) {
        InputLogEvent removed = batch.removeLast();
        batchSize -= eventSize(removed);
        if (!queue.offer(removed)) {
            getAwsLogsAppender().addWarn("Failed requeing message from too big batch");
        }
    }
    return batch;
}
 
Example #8
Source File: AsyncWorker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 5 votes vote down vote up
private static int batchSize(Collection<InputLogEvent> batch) {
    int size = 0;
    for (InputLogEvent event : batch) {
        size += eventSize(event);
    }
    return size;
}
 
Example #9
Source File: CloudWatchAppenderTest.java    From cloudwatch-logback-appender with ISC License 4 votes vote down vote up
@Test(timeout = 5000)
public void testBasic() throws InterruptedException {
	CloudWatchAppender appender = new CloudWatchAppender();
	AWSLogsClient awsLogClient = createMock(AWSLogsClient.class);
	appender.setAwsLogsClient(awsLogClient);

	appender.setMaxBatchSize(1);
	appender.setRegion("region");
	final String logGroup = "pfqoejpfqe";
	appender.setLogGroup(logGroup);
	final String logStream = "pffqjfqjpoqoejpfqe";
	appender.setLogStream(logStream);
	PatternLayout layout = new PatternLayout();
	layout.setContext(new LoggerContext());
	layout.setPattern("[%thread] %level %logger{20} - %msg%n%xThrowable");
	layout.start();
	appender.setLayout(layout);

	LoggingEvent event = new LoggingEvent();
	event.setTimeStamp(System.currentTimeMillis());
	String loggerName = "name";
	event.setLoggerName(loggerName);
	Level level = Level.DEBUG;
	event.setLevel(level);
	String message = "fjpewjfpewjfpewjfepowf";
	event.setMessage(message);

	String threadName = Thread.currentThread().getName();
	final String fullMessage = "[" + threadName + "] " + level + " " + loggerName + " - " + message + "\n";

	final PutLogEventsResult result = new PutLogEventsResult();
	String sequence = "ewopjfewfj";
	result.setNextSequenceToken(sequence);
	expect(awsLogClient.putLogEvents(isA(PutLogEventsRequest.class))).andAnswer(new IAnswer<PutLogEventsResult>() {
		@Override
		public PutLogEventsResult answer() {
			PutLogEventsRequest request = (PutLogEventsRequest) getCurrentArguments()[0];
			assertEquals(logGroup, request.getLogGroupName());
			assertEquals(logStream, request.getLogStreamName());
			List<InputLogEvent> events = request.getLogEvents();
			assertEquals(1, events.size());
			assertEquals(fullMessage, events.get(0).getMessage());
			return result;
		}
	}).times(2);
	awsLogClient.shutdown();

	// =====================================

	replay(awsLogClient);
	appender.start();
	// for coverage
	appender.start();
	appender.append(event);
	Thread.sleep(10);
	appender.append(event);
	while (appender.getEventsWrittenCount() < 2) {
		Thread.sleep(10);
	}
	appender.stop();
	verify(awsLogClient);
}
 
Example #10
Source File: Worker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
static final int eventSize(InputLogEvent event) {
    return event.getMessage().getBytes(EVENT_SIZE_CHARSET).length + EVENT_SIZE_PADDING;
}
 
Example #11
Source File: AsyncWorker.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Override
public void append(ILoggingEvent event) {
    // don't log if discardThreshold is met and event is not important (< WARN)
    if (queue.size() >= discardThreshold && !event.getLevel().isGreaterOrEqual(Level.WARN)) {
        lostCount.incrementAndGet();
        synchronized (running) {
            running.notifyAll();
        }
        return;
    }
    InputLogEvent logEvent = asInputLogEvent(event);
    // are we allowed to block ?
    if (getAwsLogsAppender().getMaxBlockTimeMillis() > 0) {
        // we are allowed to block, offer uninterruptibly for the configured maximum blocking time
        boolean interrupted = false;
        long until = System.currentTimeMillis() + getAwsLogsAppender().getMaxBlockTimeMillis();
        try {
            long now = System.currentTimeMillis();
            while (now < until) {
                try {
                    if (!queue.offer(logEvent, until - now, TimeUnit.MILLISECONDS)) {
                        lostCount.incrementAndGet();
                    }
                    break;
                } catch (InterruptedException e) {
                    interrupted = true;
                    now = System.currentTimeMillis();
                }
            }
        } finally {
            if (interrupted) {
                Thread.currentThread().interrupt();
            }
        }
    } else {
        // we are not allowed to block, offer without blocking
        if (!queue.offer(logEvent)) {
            lostCount.incrementAndGet();
        }
    }
    // trigger a flush if queue is full
    if (queue.size() >= maxBatchLogEvents) {
        synchronized (running) {
            running.notifyAll();
        }
    }
}
 
Example #12
Source File: AsyncWorkerTest.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
private static Collection<InputLogEvent> anyInputLogEvents() {
    return anyCollection();
}
 
Example #13
Source File: WorkerTest.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Theory
public void eventShouldNotBeTrimmed(@FromDataPoints("UNTRIMMED") String message) {
    InputLogEvent event = worker.asInputLogEvent(asEvent(message));
    assertFalse(event.getMessage().endsWith("..."));
}
 
Example #14
Source File: WorkerTest.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Theory
public void eventShouldBeTrimmed(@FromDataPoints("TRIMMED") String message) {
    InputLogEvent event = worker.asInputLogEvent(asEvent(message));
    assertTrue(event.getMessage().endsWith("..."));
}
 
Example #15
Source File: WorkerTest.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Theory
public void trimmingShouldNotChopMultibyteCharacter(@FromDataPoints("TRIMMED_MB") String message) {
    InputLogEvent event = worker.asInputLogEvent(asEvent(message));
    assertTrue(event.getMessage().endsWith("รถ..."));
}
 
Example #16
Source File: WorkerTest.java    From logback-awslogs-appender with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Theory
public void eventShouldNeverExceed262144Bytes(String message) throws UnsupportedEncodingException {
    InputLogEvent event = worker.asInputLogEvent(asEvent(message));
    int eventSize = event.getMessage().getBytes("UTF-8").length + 26;
    assertTrue(eventSize <= 262144);
}