org.apache.distributedlog.DLSN Java Examples

The following examples show how to use org.apache.distributedlog.DLSN. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDistributedLogServerBase.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testBulkWriteTotalFailureFirstWriteFailed() throws Exception {
    String name = String.format("dlserver-bulk-write-%s", "first-write-failed");

    dlClient.routingService.addHost(name, dlServer.getAddress());

    final int writeCount = 100;
    List<ByteBuffer> writes = new ArrayList<ByteBuffer>(writeCount + 1);
    ByteBuffer buf = ByteBuffer.allocate(MAX_LOGRECORD_SIZE + 1);
    writes.add(buf);
    for (long i = 1; i <= writeCount; i++) {
        writes.add(ByteBuffer.wrap(("" + i).getBytes()));
    }

    List<Future<DLSN>> futures = dlClient.dlClient.writeBulk(name, writes);
    validateFailedAsLogRecordTooLong(futures.get(0));
    Await.result(Futures.collect(futures.subList(1, writeCount + 1)));
}
 
Example #2
Source File: DLInputStreamTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
/**
 * Test Case: read records from the input stream.
 */
@Test
public void testRead() throws Exception {
    DistributedLogManager dlm = mock(DistributedLogManager.class);
    LogReader reader = mock(LogReader.class);
    when(dlm.getInputStream(any(DLSN.class))).thenReturn(reader);

    byte[] data = "test-read".getBytes(UTF_8);
    LogRecordWithDLSN record = mock(LogRecordWithDLSN.class);
    when(record.getPayLoadInputStream())
        .thenReturn(new ByteArrayInputStream(data));
    when(reader.readNext(anyBoolean()))
        .thenReturn(record)
        .thenThrow(new EndOfStreamException("eos"));

    DLInputStream in = new DLInputStream(dlm);
    int numReads = 0;
    int readByte;
    while ((readByte = in.read()) != -1) {
        assertEquals(data[numReads], readByte);
        ++numReads;
    }
    assertEquals(data.length, numReads);
}
 
Example #3
Source File: ZKSubscriptionsStore.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Map<String, DLSN>> getLastCommitPositions() {
    final CompletableFuture<Map<String, DLSN>> result = new CompletableFuture<Map<String, DLSN>>();
    try {
        this.zkc.get().getChildren(this.zkPath, false, new AsyncCallback.Children2Callback() {
            @Override
            public void processResult(int rc, String path, Object ctx, List<String> children, Stat stat) {
                if (KeeperException.Code.NONODE.intValue() == rc) {
                    result.complete(new HashMap<String, DLSN>());
                } else if (KeeperException.Code.OK.intValue() != rc) {
                    result.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc), path));
                } else {
                    getLastCommitPositions(result, children);
                }
            }
        }, null);
    } catch (ZooKeeperClient.ZooKeeperConnectionException zkce) {
        result.completeExceptionally(zkce);
    } catch (InterruptedException ie) {
        result.completeExceptionally(new DLInterruptedException("getLastCommitPositions was interrupted", ie));
    }
    return result;
}
 
Example #4
Source File: WriterWorker.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    LOG.info("Started writer {}.", idx);
    while (running) {
        rateLimiter.getLimiter().acquire(batchSize);
        String streamName = streamNames.get(random.nextInt(numStreams));
        final long requestMillis = System.currentTimeMillis();
        final List<ByteBuffer> data = buildBufferList(batchSize, requestMillis, messageSizeBytes);
        if (null == data) {
            break;
        }
        List<Future<DLSN>> results = dlc.writeBulk(streamName, data);
        for (Future<DLSN> result : results) {
            result.addEventListener(new TimedRequestHandler(streamName, requestMillis));
        }
    }
    dlc.close();
}
 
Example #5
Source File: TestDistributedLogServerBase.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testBulkWriteEmptyBuffer() throws Exception {
    String name = String.format("dlserver-bulk-write-%s", "empty");

    dlClient.routingService.addHost(name, dlServer.getAddress());

    List<ByteBuffer> writes = new ArrayList<ByteBuffer>();
    writes.add(ByteBuffer.wrap(("").getBytes()));
    writes.add(ByteBuffer.wrap(("").getBytes()));
    List<Future<DLSN>> futures = dlClient.dlClient.writeBulk(name, writes);
    assertEquals(2, futures.size());
    for (Future<DLSN> future : futures) {
        // No throw == pass
        DLSN dlsn = Await.result(future, Duration.fromSeconds(10));
    }
}
 
Example #6
Source File: ClusterStateOpLog.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public void truncateLogBeforeDLSN(final DLSN dlsn) {
    if (dlsn.getLogSegmentSequenceNo() <= truncatedDlsn.getLogSegmentSequenceNo()) {
        return;
    }
    this.logWriter.truncate(dlsn).addEventListener(new FutureEventListener<Boolean>() {

        @Override
        public void onFailure(Throwable t) {
            logger.error("errors while truncate log after DLSN [{}]", t, dlsn);
        }

        @Override
        public void onSuccess(Boolean isSuccess) {
            if (isSuccess) {
                truncatedDlsn = dlsn;
                logger.info("truncate log before [{}]", dlsn);
            }
        }
    });
}
 
Example #7
Source File: DistributedLogClientImpl.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
void complete(SocketAddress address, BulkWriteResponse bulkWriteResponse) {
    super.complete(address);
    Iterator<WriteResponse> writeResponseIterator = bulkWriteResponse.getWriteResponses().iterator();
    Iterator<Promise<DLSN>> resultIterator = results.iterator();

    // Fill in errors from thrift responses.
    while (resultIterator.hasNext() && writeResponseIterator.hasNext()) {
        Promise<DLSN> result = resultIterator.next();
        WriteResponse writeResponse = writeResponseIterator.next();
        if (StatusCode.SUCCESS == writeResponse.getHeader().getCode()) {
            result.setValue(DLSN.deserialize(writeResponse.getDlsn()));
        } else {
            result.setException(ProtocolUtils.exception(writeResponse.getHeader()));
        }
    }

    // Should never happen, but just in case so there's some record.
    if (bulkWriteResponse.getWriteResponses().size() != data.size()) {
        logger.error("wrong number of results, response = {} records = {}",
            bulkWriteResponse.getWriteResponses().size(), data.size());
    }
}
 
Example #8
Source File: DLFileSystem.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
public FSDataInputStream open(Path path, int bufferSize)
        throws IOException {
    try {
        DistributedLogManager dlm = namespace.openLog(getStreamName(path));
        LogReader reader;
        try {
            reader = dlm.openLogReader(DLSN.InitialDLSN);
        } catch (LogNotFoundException lnfe) {
            throw new FileNotFoundException(path.toString());
        } catch (LogEmptyException lee) {
            throw new FileNotFoundException(path.toString());
        }
        return new FSDataInputStream(
            new BufferedFSInputStream(
                new DLInputStream(dlm, reader, 0L),
                bufferSize));
    } catch (LogNotFoundException e) {
        throw new FileNotFoundException(path.toString());
    }
}
 
Example #9
Source File: HeartbeatOp.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
protected Future<WriteResponse> executeOp(AsyncLogWriter writer,
                                          Sequencer sequencer,
                                          Object txnLock) {
    // write a control record if heartbeat is the first request of the recovered log segment.
    if (writeControlRecord) {
        long txnId;
        Future<DLSN> writeResult;
        synchronized (txnLock) {
            txnId = sequencer.nextId();
            LogRecord hbRecord = new LogRecord(txnId, HEARTBEAT_DATA);
            hbRecord.setControl();
            writeResult = newTFuture(writer.write(hbRecord));
        }
        return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() {
            @Override
            public WriteResponse apply(DLSN value) {
                return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion));
            }
        });
    } else {
        return Future.value(ResponseUtils.writeSuccess());
    }
}
 
Example #10
Source File: ReaderWorker.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    final DLSN dlsnToTruncate = prevDLSN;
    if (null == dlsnToTruncate) {
        return;
    }
    final Stopwatch stopwatch = Stopwatch.createStarted();
    dlc.truncate(streamName, dlsnToTruncate).addEventListener(
            new FutureEventListener<Boolean>() {
                @Override
                public void onSuccess(Boolean value) {
                    truncationStat.registerSuccessfulEvent(
                      stopwatch.stop().elapsed(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
                }

                @Override
                public void onFailure(Throwable cause) {
                    truncationStat.registerFailedEvent(
                      stopwatch.stop().elapsed(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
                    LOG.error("Failed to truncate stream {} to {} : ",
                            new Object[]{streamName, dlsnToTruncate, cause});
                }
            });
}
 
Example #11
Source File: DistributedTranslog.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * 
 * @param operation
 * @return
 * @throws IOException 
 */
public Tuple<Future<DLSN>, Tuple<BytesReference, Long>> writeOperation(Translog.Operation operation, AtomicLong txid) throws IOException {
    BytesStreamOutput out = new BytesStreamOutput();
    try (ReleasableLock lock = writeLock.acquire()) {
        Future<DLSN> writeResult = null;
        out.writeByte(operation.opType().id());
        operation.writeTo(out);
        BytesReference bytes = out.bytes();
        LogRecord logRecord = new LogRecord(txid.incrementAndGet(), bytes.toBytes());
        writeResult = logWriter.write(logRecord);
        sizeInBytes += (20 + logRecord.getPayload().length);
        ++ numOperations;
        return new Tuple<Future<DLSN>, Tuple<BytesReference, Long>>(writeResult, new Tuple<BytesReference, Long>(bytes, txid.get()));
    } catch (TransactionIdOutOfOrderException e) {
        throw e;
    } finally {
        out.close();
    }
}
 
Example #12
Source File: DistributedLogServiceImpl.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public Future<WriteResponse> truncate(String stream, String dlsn, WriteContext ctx) {
    TruncateOp op = new TruncateOp(
        stream,
        DLSN.deserialize(dlsn),
        statsLogger,
        perStreamStatsLogger,
        getChecksum(ctx),
        featureChecksumDisabled,
        accessControlManager);
    executeStreamOp(op);
    return op.result();
}
 
Example #13
Source File: TestDLCK.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
static void verifyLogSegment(Map<Long, LogSegmentMetadata> segments,
                             DLSN lastDLSN, long logSegmentSequenceNumber,
                             int recordCount, long lastTxId) {
    LogSegmentMetadata segment = segments.get(logSegmentSequenceNumber);
    assertNotNull(segment);
    assertEquals(lastDLSN, segment.getLastDLSN());
    assertEquals(recordCount, segment.getRecordCount());
    assertEquals(lastTxId, segment.getLastTxId());
}
 
Example #14
Source File: TestDistributedLogService.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testTruncateOpChecksumBadChecksum() throws Exception {
    DistributedLogServiceImpl localService = createConfiguredLocalService();
    WriteContext ctx = new WriteContext().setCrc32(999);
    Future<WriteResponse> result = localService.truncate("test", new DLSN(1, 2, 3).serialize(), ctx);
    WriteResponse resp = Await.result(result);
    assertEquals(StatusCode.CHECKSUM_FAILED, resp.getHeader().getCode());
    localService.shutdown();
}
 
Example #15
Source File: TestLogRecordSelectors.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testFirstDLSNNotLessThanSelector() {
    DLSN dlsn = new DLSN(5L, 5L, 0L);

    FirstDLSNNotLessThanSelector largerSelector =
            new FirstDLSNNotLessThanSelector(dlsn);
    for (int i = 0; i < 10; i++) {
        largerSelector.process(DLMTestUtil.getLogRecordWithDLSNInstance(
                new DLSN(4L, i, 0L), i));
    }
    assertNull(largerSelector.result());

    FirstDLSNNotLessThanSelector smallerSelector =
            new FirstDLSNNotLessThanSelector(dlsn);
    for (int i = 0; i < 10; i++) {
        smallerSelector.process(DLMTestUtil.getLogRecordWithDLSNInstance(
                new DLSN(6L, i, 0L), i));
    }
    assertEquals(new DLSN(6L, 0L, 0L), smallerSelector.result().getDlsn());

    FirstDLSNNotLessThanSelector selector =
            new FirstDLSNNotLessThanSelector(dlsn);
    for (int i = 0; i < 10; i++) {
        selector.process(DLMTestUtil.getLogRecordWithDLSNInstance(
                new DLSN(5L, i, 0L), i));
    }
    assertEquals(dlsn, selector.result().getDlsn());
}
 
Example #16
Source File: DistributedLogClientImpl.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public List<Future<DLSN>> writeBulk(String stream, List<ByteBuffer> data) {
    if (data.size() > 0) {
        final BulkWriteOp op = new BulkWriteOp(stream, data);
        sendRequest(op);
        return op.result();
    } else {
        return Collections.emptyList();
    }
}
 
Example #17
Source File: ClusterStateOpLog.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void dumpClusterStateImage(ClusterState clusterState, DLSN dlsn) throws IOException {
    // TODO change it to a async thread
    File tmpFile = statePath.resolve(TMP_CLUSTER_STATE_IMAGE_FILE_NAME).toFile();
    logger.debug("try to create to tmp image file: [{}]", tmpFile.getAbsolutePath());
    if (!tmpFile.exists()) {
        if (!tmpFile.createNewFile()) {
            throw new IOException("could not create temp cluster state file " + tmpFile.getName());
        }
    }
    ClusterStateWithDLSN clusterStateWithDLSN = new ClusterStateWithDLSN(clusterState, dlsn);
    FileOutputStream fileOutputStream = new FileOutputStream(tmpFile);
    BytesStreamOutput bStream = new BytesStreamOutput();
    clusterStateWithDLSN.writeTo(bStream);
    fileOutputStream.write(bStream.bytes().toBytes());
    fileOutputStream.flush();
    fileOutputStream.close();
    File clusterStateFile = statePath.resolve(CLUSTER_STATE_IMAGE_FILE_NAME).toFile();
    if (clusterStateFile.exists()) {
        clusterStateFile.delete();
    }
    if (!tmpFile.renameTo(clusterStateFile)) {
        throw new IOException("failed to rename cluster state file from " + tmpFile.getName() 
                + " to current " + clusterStateFile.getName());
    }
    logger.info("dump log image into file, info version [{}], DLSN [{}], sequenceno [{}]", clusterState.version(), dlsn, dlsn.getLogSegmentSequenceNo());
    dumpedDlsn = dlsn;
}
 
Example #18
Source File: ClusterStateOpLog.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void writeControlRecord(long version) throws IOException {
    try {
        LogRecord logRecord = new LogRecord(version, new byte[1]);
        logRecord.setControl();
        Future<DLSN> result = logWriter.write(logRecord);
        FutureUtils.result(result);
        return;
    } catch (TransactionIdOutOfOrderException e) {
        throw e;
    }
}
 
Example #19
Source File: TestDistributedLogServerBase.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testBulkWritePartialFailure() throws Exception {
    String name = String.format("dlserver-bulk-write-%s", "partial-failure");

    dlClient.routingService.addHost(name, dlServer.getAddress());

    final int writeCount = 100;

    List<ByteBuffer> writes = new ArrayList<ByteBuffer>(writeCount * 2 + 1);
    for (long i = 1; i <= writeCount; i++) {
        writes.add(ByteBuffer.wrap(("" + i).getBytes()));
    }
    // Too big, will cause partial failure.
    ByteBuffer buf = ByteBuffer.allocate(MAX_LOGRECORD_SIZE + 1);
    writes.add(buf);
    for (long i = 1; i <= writeCount; i++) {
        writes.add(ByteBuffer.wrap(("" + i).getBytes()));
    }

    // Count succeeded.
    List<Future<DLSN>> futures = dlClient.dlClient.writeBulk(name, writes);
    int succeeded = 0;
    for (int i = 0; i < writeCount; i++) {
        Future<DLSN> future = futures.get(i);
        try {
            Await.result(future, Duration.fromSeconds(10));
            ++succeeded;
        } catch (Exception ex) {
            failDueToWrongException(ex);
        }
    }

    validateFailedAsLogRecordTooLong(futures.get(writeCount));
    Await.result(Futures.collect(futures.subList(writeCount + 1, 2 * writeCount + 1)));
    assertEquals(writeCount, succeeded);
}
 
Example #20
Source File: TestDistributedLogService.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testNonDurableWrite() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setOutputBufferSize(Integer.MAX_VALUE)
            .setImmediateFlushEnabled(false)
            .setPeriodicFlushFrequencyMilliSeconds(0)
            .setDurableWriteEnabled(false);
    ServerConfiguration serverConfLocal = new ServerConfiguration();
    serverConfLocal.addConfiguration(serverConf);
    serverConfLocal.enableDurableWrite(false);
    serverConfLocal.setServiceTimeoutMs(Integer.MAX_VALUE)
            .setStreamProbationTimeoutMs(Integer.MAX_VALUE);
    String streamName = testName.getMethodName();
    DistributedLogServiceImpl localService =
            createService(serverConfLocal, confLocal);
    StreamManagerImpl streamManager = (StreamManagerImpl) localService.getStreamManager();

    int numWrites = 10;
    List<Future<WriteResponse>> futureList = new ArrayList<Future<WriteResponse>>();
    for (int i = 0; i < numWrites; i++) {
        futureList.add(localService.write(streamName, createRecord(i)));
    }
    assertTrue("Stream " + streamName + " should be cached",
            streamManager.getCachedStreams().containsKey(streamName));
    List<WriteResponse> resultList = Await.result(Future.collect(futureList));
    for (WriteResponse wr : resultList) {
        assertEquals(DLSN.InvalidDLSN, DLSN.deserialize(wr.getDlsn()));
    }

    localService.shutdown();
}
 
Example #21
Source File: TestDistributedLogService.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testTruncateOpNoChecksum() throws Exception {
    DistributedLogServiceImpl localService = createConfiguredLocalService();
    WriteContext ctx = new WriteContext();
    Future<WriteResponse> result = localService.truncate("test", new DLSN(1, 2, 3).serialize(), ctx);
    WriteResponse resp = Await.result(result);
    assertEquals(StatusCode.SUCCESS, resp.getHeader().getCode());
    localService.shutdown();
}
 
Example #22
Source File: DLInputStreamTest.java    From pulsar with Apache License 2.0 5 votes vote down vote up
/**
 * Test Case: close the input stream
 */
@Test
public void testClose() throws Exception {
    DistributedLogManager dlm = mock(DistributedLogManager.class);
    LogReader reader = mock(LogReader.class);
    when(dlm.getInputStream(any(DLSN.class))).thenReturn(reader);

    DLInputStream in = new DLInputStream(dlm);
    verify(dlm, times(1)).getInputStream(eq(DLSN.InitialDLSN));
    in.close();
    verify(dlm, times(1)).close();
    verify(reader, times(1)).close();
}
 
Example #23
Source File: DLInputStreamTest.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
/**
 * Test Case: close the input stream
 */
@Test
public void testClose() throws Exception {
  DistributedLogManager dlm = mock(DistributedLogManager.class);
  LogReader reader = mock(LogReader.class);
  when(dlm.getInputStream(any(DLSN.class))).thenReturn(reader);

  DLInputStream in = new DLInputStream(dlm);
  verify(dlm, times(1)).getInputStream(eq(DLSN.InitialDLSN));
  in.close();
  verify(dlm, times(1)).close();
  verify(reader, times(1)).close();
}
 
Example #24
Source File: TestDistributedLogMultiStreamWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 20000)
public void testFlushWhenBufferIsFull() throws Exception {
    DistributedLogClient client = mock(DistributedLogClient.class);
    when(client.writeRecordSet((String) any(), (LogRecordSetBuffer) any()))
            .thenReturn(Future.value(new DLSN(1L, 1L, 999L)));

    ScheduledExecutorService executorService =
            Executors.newSingleThreadScheduledExecutor();
    DistributedLogMultiStreamWriter writer = DistributedLogMultiStreamWriter.newBuilder()
            .streams(Lists.newArrayList("stream1", "stream2"))
            .client(client)
            .compressionCodec(CompressionCodec.Type.LZ4)
            .firstSpeculativeTimeoutMs(100000)
            .maxSpeculativeTimeoutMs(200000)
            .speculativeBackoffMultiplier(2)
            .requestTimeoutMs(500000)
            .flushIntervalMs(0)
            .bufferSize(0)
            .scheduler(executorService)
            .build();

    ByteBuffer buffer = ByteBuffer.wrap("test".getBytes(UTF_8));
    writer.write(buffer);

    verify(client, times(1)).writeRecordSet((String) any(), (LogRecordSetBuffer) any());

    writer.close();
}
 
Example #25
Source File: LogSegmentMetadataStoreUpdater.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<LogSegmentMetadata> updateLastRecord(LogSegmentMetadata segment,
                                                              LogRecordWithDLSN record) {
    DLSN dlsn = record.getDlsn();
    checkState(!segment.isInProgress(),
            "Updating last dlsn for an inprogress log segment isn't supported.");
    checkArgument(segment.isDLSNinThisSegment(dlsn),
            "DLSN " + dlsn + " doesn't belong to segment " + segment);
    final LogSegmentMetadata newSegment = segment.mutator()
            .setLastDLSN(dlsn)
            .setLastTxId(record.getTransactionId())
            .setRecordCount(record)
            .build();
    return updateSegmentMetadata(newSegment);
}
 
Example #26
Source File: LogSegmentMetadataStoreUpdater.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
/**
 * Change the truncation status of a <i>log segment</i> to partially truncated.
 *
 * @param segment log segment to change sequence number.
 * @param minActiveDLSN DLSN within the log segment before which log has been truncated
 * @return new log segment
 */
@Override
public CompletableFuture<LogSegmentMetadata>
setLogSegmentPartiallyTruncated(LogSegmentMetadata segment, DLSN minActiveDLSN) {
    final LogSegmentMetadata newSegment = segment.mutator()
        .setTruncationStatus(LogSegmentMetadata.TruncationStatus.PARTIALLY_TRUNCATED)
        .setMinActiveDLSN(minActiveDLSN)
        .build();
    return addNewSegmentAndDeleteOldSegment(newSegment, segment);
}
 
Example #27
Source File: ConsoleProxyRRMultiWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (2 != args.length) {
        System.out.println(HELP);
        return;
    }

    String finagleNameStr = args[0];
    final String streamList = args[1];

    DistributedLogClient client = DistributedLogClientBuilder.newBuilder()
            .clientId(ClientId$.MODULE$.apply("console-proxy-writer"))
            .name("console-proxy-writer")
            .thriftmux(true)
            .finagleNameStr(finagleNameStr)
            .build();
    String[] streamNameList = StringUtils.split(streamList, ',');
    RRMultiWriter<Integer, String> writer = new RRMultiWriter(streamNameList, client);

    ConsoleReader reader = new ConsoleReader();
    String line;
    while ((line = reader.readLine(PROMPT_MESSAGE)) != null) {
        writer.write(line)
                .addEventListener(new FutureEventListener<DLSN>() {
                    @Override
                    public void onFailure(Throwable cause) {
                        System.out.println("Encountered error on writing data");
                        cause.printStackTrace(System.err);
                        Runtime.getRuntime().exit(0);
                    }

                    @Override
                    public void onSuccess(DLSN value) {
                        // done
                    }
                });
    }

    client.close();
}
 
Example #28
Source File: DistributedLogTool.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
protected int runCmd() throws Exception {
    DistributedLogManager dlm = getNamespace().openLog(getStreamName());
    long totalCount = dlm.getLogRecordCount();
    try {
        AsyncLogReader reader;
        Object startOffset;
        try {
            DLSN lastDLSN = FutureUtils.result(dlm.getLastDLSNAsync());
            System.out.println("Last DLSN : " + lastDLSN);
            if (null == fromDLSN) {
                reader = dlm.getAsyncLogReader(fromTxnId);
                startOffset = fromTxnId;
            } else {
                reader = dlm.getAsyncLogReader(fromDLSN);
                startOffset = fromDLSN;
            }
        } catch (LogNotFoundException lee) {
            System.out.println("No stream found to dump records.");
            return 0;
        }
        try {
            System.out.println(String.format("Dump records for %s (from = %s, dump"
                    + " count = %d, total records = %d)", getStreamName(), startOffset, count, totalCount));

            dumpRecords(reader);
        } finally {
            Utils.close(reader);
        }
    } finally {
        dlm.close();
    }
    return 0;
}
 
Example #29
Source File: DistributedLogTool.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private int truncateStream(final Namespace namespace, String streamName, DLSN dlsn) throws Exception {
    DistributedLogManager dlm = namespace.openLog(streamName);
    try {
        long totalRecords = dlm.getLogRecordCount();
        long recordsAfterTruncate = FutureUtils.result(dlm.getLogRecordCountAsync(dlsn));
        long recordsToTruncate = totalRecords - recordsAfterTruncate;
        if (!getForce() && !IOUtils.confirmPrompt("Do you want to truncate "
                + streamName + " at dlsn " + dlsn + " (" + recordsToTruncate + " records)?")) {
            return 0;
        } else {
            AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
            try {
                if (!FutureUtils.result(writer.truncate(dlsn))) {
                    System.out.println("Failed to truncate.");
                }
                return 0;
            } finally {
                Utils.close(writer);
            }
        }
    } catch (Exception ex) {
        System.err.println("Failed to truncate " + ex);
        return 1;
    } finally {
        dlm.close();
    }
}
 
Example #30
Source File: ZKSubscriptionStateStore.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
/**
 * Get the last committed position stored for this subscription.
 */
@Override
public CompletableFuture<DLSN> getLastCommitPosition() {
    if (null != lastCommittedPosition.get()) {
        return FutureUtils.value(lastCommittedPosition.get());
    } else {
        return getLastCommitPositionFromZK();
    }
}