Java Code Examples for com.google.common.collect.Queues#newConcurrentLinkedQueue()

The following examples show how to use com.google.common.collect.Queues#newConcurrentLinkedQueue() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParallelRunnerTest.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
@Test(dependsOnMethods = "testSerializeToSequenceFile")
public void testDeserializeFromSequenceFile() throws IOException {
  Queue<WorkUnitState> workUnitStates = Queues.newConcurrentLinkedQueue();

  Path seqPath1 = new Path(this.outputPath, "seq1");
  Path seqPath2 = new Path(this.outputPath, "seq2");

  try (ParallelRunner parallelRunner = new ParallelRunner(2, this.fs)) {
    parallelRunner.deserializeFromSequenceFile(Text.class, WorkUnitState.class, seqPath1, workUnitStates, true);
    parallelRunner.deserializeFromSequenceFile(Text.class, WorkUnitState.class, seqPath2, workUnitStates, true);
  }

  Assert.assertFalse(this.fs.exists(seqPath1));
  Assert.assertFalse(this.fs.exists(seqPath2));

  Assert.assertEquals(workUnitStates.size(), 2);

  for (WorkUnitState workUnitState : workUnitStates) {
    TestWatermark watermark = new Gson().fromJson(workUnitState.getActualHighWatermark(), TestWatermark.class);
    Assert.assertTrue(watermark.getLongWatermark() == 10L || watermark.getLongWatermark() == 100L);
  }
}
 
Example 2
Source File: ConsumerImpl.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Override
protected CompletableFuture<Messages<T>> internalBatchReceiveAsync() {
    CompletableFuture<Messages<T>> result = new CompletableFuture<>();
    try {
        lock.writeLock().lock();
        if (pendingBatchReceives == null) {
            pendingBatchReceives = Queues.newConcurrentLinkedQueue();
        }
        if (hasEnoughMessagesForBatchReceive()) {
            MessagesImpl<T> messages = getNewMessagesImpl();
            Message<T> msgPeeked = incomingMessages.peek();
            while (msgPeeked != null && messages.canAdd(msgPeeked)) {
                Message<T> msg = incomingMessages.poll();
                if (msg != null) {
                    messageProcessed(msg);
                    Message<T> interceptMsg = beforeConsume(msg);
                    messages.add(interceptMsg);
                }
                msgPeeked = incomingMessages.peek();
            }
            result.complete(messages);
        } else {
            pendingBatchReceives.add(OpBatchReceive.of(result));
        }
    } finally {
        lock.writeLock().unlock();
    }
    return result;
}
 
Example 3
Source File: DelayedActionTickHandler.java    From OpenModsLib with MIT License 5 votes vote down vote up
private Queue<Runnable> getWorldQueue(int worldId) {
	synchronized (callbacks) {
		Queue<Runnable> result = callbacks.get(worldId);

		if (result == null) {
			result = Queues.newConcurrentLinkedQueue();
			callbacks.put(worldId, result);
		}

		return result;
	}
}
 
Example 4
Source File: TestEdges.java    From workflow with Apache License 2.0 5 votes vote down vote up
@Test
public void testIdempotency() throws Exception
{
    TaskType idempotentType = new TaskType("yes", "1", true);
    TaskType nonIdempotentType = new TaskType("no", "1", false);

    Task idempotentTask = new Task(new TaskId(), idempotentType);
    Task nonIdempotentTask = new Task(new TaskId(), nonIdempotentType);
    Task root = new Task(new TaskId(), Lists.newArrayList(idempotentTask, nonIdempotentTask));

    Set<TaskId> thrownTasks = Sets.newConcurrentHashSet();
    Queue<TaskId> tasks = Queues.newConcurrentLinkedQueue();
    TaskExecutor taskExecutor = (m, t) -> () -> {
        if ( thrownTasks.add(t.getTaskId()) )
        {
            throw new RuntimeException();
        }
        tasks.add(t.getTaskId());
        return new TaskExecutionResult(TaskExecutionStatus.SUCCESS, "");
    };
    WorkflowManager workflowManager = WorkflowManagerBuilder.builder()
        .addingTaskExecutor(taskExecutor, 10, idempotentType)
        .addingTaskExecutor(taskExecutor, 10, nonIdempotentType)
        .withCurator(curator, "test", "1")
        .build();
    try
    {
        workflowManager.start();
        workflowManager.submitTask(root);

        timing.sleepABit();

        Assert.assertEquals(tasks.size(), 1);
        Assert.assertEquals(tasks.poll(), idempotentTask.getTaskId());
    }
    finally
    {
        CloseableUtils.closeQuietly(workflowManager);
    }
}
 
Example 5
Source File: HadoopUtils.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * This method is an additive implementation of the {@link FileSystem#rename(Path, Path)} method. It moves all the
 * files/directories under 'from' path to the 'to' path without overwriting existing directories in the 'to' path.
 *
 * <p>
 * The rename operation happens at the first non-existent sub-directory. If a directory at destination path already
 * exists, it recursively tries to move sub-directories. If all the sub-directories also exist at the destination,
 * a file level move is done
 * </p>
 *
 * @param fileSystem on which the data needs to be moved
 * @param from path of the data to be moved
 * @param to path of the data to be moved
 */
public static void renameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException {

  log.info(String.format("Recursively renaming %s in %s to %s.", from, fileSystem.getUri(), to));

  FileSystem throttledFS = getOptionallyThrottledFileSystem(fileSystem, 10000);

  ExecutorService executorService = ScalingThreadPoolExecutor.newScalingThreadPool(1, 100, 100,
      ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("rename-thread-%d")));
  Queue<Future<?>> futures = Queues.newConcurrentLinkedQueue();

  try {
    if (!fileSystem.exists(from)) {
      throw new IOException("Trying to rename a path that does not exist! " + from);
    }

    futures.add(executorService
        .submit(new RenameRecursively(throttledFS, fileSystem.getFileStatus(from), to, executorService, futures)));
    int futuresUsed = 0;
    while (!futures.isEmpty()) {
      try {
        futures.poll().get();
        futuresUsed++;
      } catch (ExecutionException | InterruptedException ee) {
        throw new IOException(ee.getCause());
      }
    }

    log.info(String.format("Recursive renaming of %s to %s. (details: used %d futures)", from, to, futuresUsed));

  } finally {
    ExecutorsUtils.shutdownExecutorService(executorService, Optional.of(log), 1, TimeUnit.SECONDS);
  }
}
 
Example 6
Source File: ManagedLedgerImpl.java    From pulsar with Apache License 2.0 5 votes vote down vote up
public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store,
        ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, OrderedExecutor orderedExecutor,
        final String name, final Supplier<Boolean> mlOwnershipChecker) {
    this.factory = factory;
    this.bookKeeper = bookKeeper;
    this.config = config;
    this.store = store;
    this.name = name;
    this.ledgerMetadata = LedgerMetadataUtils.buildBaseManagedLedgerMetadata(name);
    this.digestType = BookKeeper.DigestType.fromApiDigestType(config.getDigestType());
    this.scheduledExecutor = scheduledExecutor;
    this.executor = orderedExecutor;
    TOTAL_SIZE_UPDATER.set(this, 0);
    NUMBER_OF_ENTRIES_UPDATER.set(this, 0);
    ENTRIES_ADDED_COUNTER_UPDATER.set(this, 0);
    STATE_UPDATER.set(this, State.None);
    this.ledgersStat = null;
    this.mbean = new ManagedLedgerMBeanImpl(this);
    this.entryCache = factory.getEntryCacheManager().getEntryCache(this);
    this.waitingCursors = Queues.newConcurrentLinkedQueue();
    this.uninitializedCursors = Maps.newHashMap();
    this.clock = config.getClock();

    // Get the next rollover time. Add a random value upto 5% to avoid rollover multiple ledgers at the same time
    this.maximumRolloverTimeMs = (long) (config.getMaximumRolloverTimeMs() * (1 + random.nextDouble() * 5 / 100.0));
    this.mlOwnershipChecker = mlOwnershipChecker;
    this.propertiesMap = Maps.newHashMap();
}
 
Example 7
Source File: ClientCnx.java    From pulsar with Apache License 2.0 5 votes vote down vote up
public ClientCnx(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, int protocolVersion) {
    super(conf.getKeepAliveIntervalSeconds(), TimeUnit.SECONDS);
    checkArgument(conf.getMaxLookupRequest() > conf.getConcurrentLookupRequest());
    this.pendingLookupRequestSemaphore = new Semaphore(conf.getConcurrentLookupRequest(), false);
    this.maxLookupRequestSemaphore = new Semaphore(conf.getMaxLookupRequest() - conf.getConcurrentLookupRequest(), false);
    this.waitingLookupRequests = Queues.newConcurrentLinkedQueue();
    this.authentication = conf.getAuthentication();
    this.eventLoopGroup = eventLoopGroup;
    this.maxNumberOfRejectedRequestPerConnection = conf.getMaxNumberOfRejectedRequestPerConnection();
    this.operationTimeoutMs = conf.getOperationTimeoutMs();
    this.state = State.None;
    this.isTlsHostnameVerificationEnable = conf.isTlsHostnameVerificationEnable();
    this.protocolVersion = protocolVersion;
}
 
Example 8
Source File: ConsumerBase.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private void pendingBatchReceiveTask(Timeout timeout) throws Exception {
    if (timeout.isCancelled()) {
        return;
    }

    long timeToWaitMs;

    synchronized (this) {
        // If it's closing/closed we need to ignore this timeout and not schedule next timeout.
        if (getState() == State.Closing || getState() == State.Closed) {
            return;
        }
        if (pendingBatchReceives == null) {
            pendingBatchReceives = Queues.newConcurrentLinkedQueue();
        }
        OpBatchReceive<T> firstOpBatchReceive = pendingBatchReceives.peek();
        timeToWaitMs = batchReceivePolicy.getTimeoutMs();

        while (firstOpBatchReceive != null) {
            // If there is at least one batch receive, calculate the diff between the batch receive timeout
            // and the elapsed time since the operation was created.
            long diff = batchReceivePolicy.getTimeoutMs()
                    - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - firstOpBatchReceive.createdAt);
            if (diff <= 0) {
                // The diff is less than or equal to zero, meaning that the batch receive has been timed out.
                // complete the OpBatchReceive and continue to check the next OpBatchReceive in pendingBatchReceives.
                OpBatchReceive<T> op = pendingBatchReceives.poll();
                completeOpBatchReceive(op);
                firstOpBatchReceive = pendingBatchReceives.peek();
            } else {
                // The diff is greater than zero, set the timeout to the diff value
                timeToWaitMs = diff;
                break;
            }
        }
        batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, timeToWaitMs, TimeUnit.MILLISECONDS);
    }
}
 
Example 9
Source File: KafkaCommandDecoder.java    From kop with Apache License 2.0 5 votes vote down vote up
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
    super.channelActive(ctx);
    this.remoteAddress = ctx.channel().remoteAddress();
    this.ctx = ctx;
    isActive.set(true);
    requestsQueue = Queues.newConcurrentLinkedQueue();
}
 
Example 10
Source File: MultiTopicsConsumerImpl.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Override
protected CompletableFuture<Messages<T>> internalBatchReceiveAsync() {
    CompletableFuture<Messages<T>> result = new CompletableFuture<>();
    try {
        lock.writeLock().lock();
        if (pendingBatchReceives == null) {
            pendingBatchReceives = Queues.newConcurrentLinkedQueue();
        }
        if (hasEnoughMessagesForBatchReceive()) {
            MessagesImpl<T> messages = getNewMessagesImpl();
            Message<T> msgPeeked = incomingMessages.peek();
            while (msgPeeked != null && messages.canAdd(msgPeeked)) {
                Message<T> msg = incomingMessages.poll();
                if (msg != null) {
                    INCOMING_MESSAGES_SIZE_UPDATER.addAndGet(this, -msg.getData().length);
                    Message<T> interceptMsg = beforeConsume(msg);
                    messages.add(interceptMsg);
                }
                msgPeeked = incomingMessages.peek();
            }
            result.complete(messages);
        } else {
            pendingBatchReceives.add(OpBatchReceive.of(result));
        }
        resumeReceivingFromPausedConsumersIfNeeded();
    } finally {
        lock.writeLock().unlock();
    }
    return result;
}
 
Example 11
Source File: Futures.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Returns a list of delegate futures that correspond to the futures received in the order that
 * they complete. Delegate futures return the same value or throw the same exception as the
 * corresponding input future returns/throws.
 *
 * <p>Cancelling a delegate future has no effect on any input future, since the delegate future
 * does not correspond to a specific input future until the appropriate number of input futures
 * have completed. At that point, it is too late to cancel the input future. The input future's
 * result, which cannot be stored into the cancelled delegate future, is ignored.
 *
 * @since 17.0
 */
@Beta
@GwtIncompatible // TODO
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(
    Iterable<? extends ListenableFuture<? extends T>> futures) {
  // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
  // ArrayDeque
  final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
  ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
  // Using SerializingExecutor here will ensure that each CompletionOrderListener executes
  // atomically and therefore that each returned future is guaranteed to be in completion order.
  // N.B. there are some cases where the use of this executor could have possibly surprising
  // effects when input futures finish at approximately the same time _and_ the output futures
  // have directExecutor listeners. In this situation, the listeners may end up running on a
  // different thread than if they were attached to the corresponding input future. We believe
  // this to be a negligible cost since:
  // 1. Using the directExecutor implies that your callback is safe to run on any thread.
  // 2. This would likely only be noticeable if you were doing something expensive or blocking on
  //    a directExecutor listener on one of the output futures which is an antipattern anyway.
  SerializingExecutor executor = new SerializingExecutor(directExecutor());
  for (final ListenableFuture<? extends T> future : futures) {
    SettableFuture<T> delegate = SettableFuture.create();
    // Must make sure to add the delegate to the queue first in case the future is already done
    delegates.add(delegate);
    future.addListener(
        new Runnable() {
          @Override
          public void run() {
            delegates.remove().setFuture(future);
          }
        },
        executor);
    listBuilder.add(delegate);
  }
  return listBuilder.build();
}
 
Example 12
Source File: Futures.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Returns a list of delegate futures that correspond to the futures received in the order that
 * they complete. Delegate futures return the same value or throw the same exception as the
 * corresponding input future returns/throws.
 *
 * <p>Cancelling a delegate future has no effect on any input future, since the delegate future
 * does not correspond to a specific input future until the appropriate number of input futures
 * have completed. At that point, it is too late to cancel the input future. The input future's
 * result, which cannot be stored into the cancelled delegate future, is ignored.
 *
 * @since 17.0
 */

@Beta
@GwtIncompatible // TODO
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(Iterable<? extends ListenableFuture<? extends T>> futures) {
  // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
  // ArrayDeque
  final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
  ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
  // Using SerializingExecutor here will ensure that each CompletionOrderListener executes
  // atomically and therefore that each returned future is guaranteed to be in completion order.
  // N.B. there are some cases where the use of this executor could have possibly surprising
  // effects when input futures finish at approximately the same time _and_ the output futures
  // have directExecutor listeners. In this situation, the listeners may end up running on a
  // different thread than if they were attached to the corresponding input future. We believe
  // this to be a negligible cost since:
  // 1. Using the directExecutor implies that your callback is safe to run on any thread.
  // 2. This would likely only be noticeable if you were doing something expensive or blocking on
  //    a directExecutor listener on one of the output futures which is an antipattern anyway.
  SerializingExecutor executor = new SerializingExecutor(directExecutor());
  for (final ListenableFuture<? extends T> future : futures) {
    SettableFuture<T> delegate = SettableFuture.create();
    // Must make sure to add the delegate to the queue first in case the future is already done
    delegates.add(delegate);
    future.addListener(
      new Runnable() {
        @Override
        public void run() {
          delegates.remove().setFuture(future);
        }
      },
      executor);
    listBuilder.add(delegate);
  }
  return listBuilder.build();
}
 
Example 13
Source File: Futures.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Returns a list of delegate futures that correspond to the futures received in the order that
 * they complete. Delegate futures return the same value or throw the same exception as the
 * corresponding input future returns/throws.
 *
 * <p>Cancelling a delegate future has no effect on any input future, since the delegate future
 * does not correspond to a specific input future until the appropriate number of input futures
 * have completed. At that point, it is too late to cancel the input future. The input future's
 * result, which cannot be stored into the cancelled delegate future, is ignored.
 *
 * @since 17.0
 */

@Beta
@GwtIncompatible // TODO
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(Iterable<? extends ListenableFuture<? extends T>> futures) {
  // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
  // ArrayDeque
  final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
  ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
  // Using SerializingExecutor here will ensure that each CompletionOrderListener executes
  // atomically and therefore that each returned future is guaranteed to be in completion order.
  // N.B. there are some cases where the use of this executor could have possibly surprising
  // effects when input futures finish at approximately the same time _and_ the output futures
  // have directExecutor listeners. In this situation, the listeners may end up running on a
  // different thread than if they were attached to the corresponding input future. We believe
  // this to be a negligible cost since:
  // 1. Using the directExecutor implies that your callback is safe to run on any thread.
  // 2. This would likely only be noticeable if you were doing something expensive or blocking on
  //    a directExecutor listener on one of the output futures which is an antipattern anyway.
  SerializingExecutor executor = new SerializingExecutor(directExecutor());
  for (final ListenableFuture<? extends T> future : futures) {
    SettableFuture<T> delegate = SettableFuture.create();
    // Must make sure to add the delegate to the queue first in case the future is already done
    delegates.add(delegate);
    future.addListener(
      new Runnable() {
        @Override
        public void run() {
          delegates.remove().setFuture(future);
        }
      },
      executor);
    listBuilder.add(delegate);
  }
  return listBuilder.build();
}
 
Example 14
Source File: Futures.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Returns a list of delegate futures that correspond to the futures received in the order that
 * they complete. Delegate futures return the same value or throw the same exception as the
 * corresponding input future returns/throws.
 *
 * <p>Cancelling a delegate future has no effect on any input future, since the delegate future
 * does not correspond to a specific input future until the appropriate number of input futures
 * have completed. At that point, it is too late to cancel the input future. The input future's
 * result, which cannot be stored into the cancelled delegate future, is ignored.
 *
 * @since 17.0
 */

@Beta
@GwtIncompatible // TODO
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(Iterable<? extends ListenableFuture<? extends T>> futures) {
  // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
  // ArrayDeque
  final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
  ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
  // Using SerializingExecutor here will ensure that each CompletionOrderListener executes
  // atomically and therefore that each returned future is guaranteed to be in completion order.
  // N.B. there are some cases where the use of this executor could have possibly surprising
  // effects when input futures finish at approximately the same time _and_ the output futures
  // have directExecutor listeners. In this situation, the listeners may end up running on a
  // different thread than if they were attached to the corresponding input future. We believe
  // this to be a negligible cost since:
  // 1. Using the directExecutor implies that your callback is safe to run on any thread.
  // 2. This would likely only be noticeable if you were doing something expensive or blocking on
  //    a directExecutor listener on one of the output futures which is an antipattern anyway.
  SerializingExecutor executor = new SerializingExecutor(directExecutor());
  for (final ListenableFuture<? extends T> future : futures) {
    SettableFuture<T> delegate = SettableFuture.create();
    // Must make sure to add the delegate to the queue first in case the future is already done
    delegates.add(delegate);
    future.addListener(
      new Runnable() {
        @Override
        public void run() {
          delegates.remove().setFuture(future);
        }
      },
      executor);
    listBuilder.add(delegate);
  }
  return listBuilder.build();
}
 
Example 15
Source File: Futures.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Returns a list of delegate futures that correspond to the futures received in the order that
 * they complete. Delegate futures return the same value or throw the same exception as the
 * corresponding input future returns/throws.
 *
 * <p>Cancelling a delegate future has no effect on any input future, since the delegate future
 * does not correspond to a specific input future until the appropriate number of input futures
 * have completed. At that point, it is too late to cancel the input future. The input future's
 * result, which cannot be stored into the cancelled delegate future, is ignored.
 *
 * @since 17.0
 */

@Beta
@GwtIncompatible // TODO
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(Iterable<? extends ListenableFuture<? extends T>> futures) {
  // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
  // ArrayDeque
  final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
  ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
  // Using SerializingExecutor here will ensure that each CompletionOrderListener executes
  // atomically and therefore that each returned future is guaranteed to be in completion order.
  // N.B. there are some cases where the use of this executor could have possibly surprising
  // effects when input futures finish at approximately the same time _and_ the output futures
  // have directExecutor listeners. In this situation, the listeners may end up running on a
  // different thread than if they were attached to the corresponding input future. We believe
  // this to be a negligible cost since:
  // 1. Using the directExecutor implies that your callback is safe to run on any thread.
  // 2. This would likely only be noticeable if you were doing something expensive or blocking on
  //    a directExecutor listener on one of the output futures which is an antipattern anyway.
  SerializingExecutor executor = new SerializingExecutor(directExecutor());
  for (final ListenableFuture<? extends T> future : futures) {
    SettableFuture<T> delegate = SettableFuture.create();
    // Must make sure to add the delegate to the queue first in case the future is already done
    delegates.add(delegate);
    future.addListener(
      new Runnable() {
        @Override
        public void run() {
          delegates.remove().setFuture(future);
        }
      },
      executor);
    listBuilder.add(delegate);
  }
  return listBuilder.build();
}
 
Example 16
Source File: ConsumerBase.java    From pulsar with Apache License 2.0 4 votes vote down vote up
protected ConsumerBase(PulsarClientImpl client, String topic, ConsumerConfigurationData<T> conf,
                       int receiverQueueSize, ExecutorService listenerExecutor,
                       CompletableFuture<Consumer<T>> subscribeFuture, Schema<T> schema, ConsumerInterceptors interceptors) {
    super(client, topic);
    this.maxReceiverQueueSize = receiverQueueSize;
    this.subscription = conf.getSubscriptionName();
    this.conf = conf;
    this.consumerName = conf.getConsumerName() == null ? ConsumerName.generateRandomName() : conf.getConsumerName();
    this.subscribeFuture = subscribeFuture;
    this.listener = conf.getMessageListener();
    this.consumerEventListener = conf.getConsumerEventListener();
    // Always use growable queue since items can exceed the advertised size
    this.incomingMessages = new GrowableArrayBlockingQueue<>();
    this.unAckedChunckedMessageIdSequenceMap = new ConcurrentOpenHashMap<>();

    this.listenerExecutor = listenerExecutor;
    this.pendingReceives = Queues.newConcurrentLinkedQueue();
    this.schema = schema;
    this.interceptors = interceptors;
    if (conf.getBatchReceivePolicy() != null) {
        BatchReceivePolicy userBatchReceivePolicy = conf.getBatchReceivePolicy();
        if (userBatchReceivePolicy.getMaxNumMessages() > this.maxReceiverQueueSize) {
            this.batchReceivePolicy = BatchReceivePolicy.builder()
                    .maxNumMessages(this.maxReceiverQueueSize)
                    .maxNumBytes(userBatchReceivePolicy.getMaxNumBytes())
                    .timeout((int) userBatchReceivePolicy.getTimeoutMs(), TimeUnit.MILLISECONDS)
                    .build();
            log.warn("BatchReceivePolicy maxNumMessages: {} is greater than maxReceiverQueueSize: {}, " +
                    "reset to maxReceiverQueueSize. batchReceivePolicy: {}",
                    userBatchReceivePolicy.getMaxNumMessages(), this.maxReceiverQueueSize,
                    this.batchReceivePolicy.toString());
        } else if (userBatchReceivePolicy.getMaxNumMessages() <= 0 && userBatchReceivePolicy.getMaxNumBytes() <= 0) {
            this.batchReceivePolicy = BatchReceivePolicy.builder()
                    .maxNumMessages(BatchReceivePolicy.DEFAULT_POLICY.getMaxNumMessages())
                    .maxNumBytes(BatchReceivePolicy.DEFAULT_POLICY.getMaxNumBytes())
                    .timeout((int) userBatchReceivePolicy.getTimeoutMs(), TimeUnit.MILLISECONDS)
                    .build();
            log.warn("BatchReceivePolicy maxNumMessages: {} or maxNumBytes: {} is less than 0. " +
                    "Reset to DEFAULT_POLICY. batchReceivePolicy: {}", userBatchReceivePolicy.getMaxNumMessages(),
                    userBatchReceivePolicy.getMaxNumBytes(), this.batchReceivePolicy.toString());
        } else {
            this.batchReceivePolicy = conf.getBatchReceivePolicy();
        }
    } else {
        this.batchReceivePolicy = BatchReceivePolicy.DEFAULT_POLICY;
    }

    if (batchReceivePolicy.getTimeoutMs() > 0) {
        batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, batchReceivePolicy.getTimeoutMs(), TimeUnit.MILLISECONDS);
    }
}
 
Example 17
Source File: PulsarByteBufferMessageSet.java    From pulsar with Apache License 2.0 4 votes vote down vote up
public MessageAndOffsetIterator(Reader<byte[]> reader) {
    this.reader = reader;
    this.receivedMessages = Queues.newConcurrentLinkedQueue();
}
 
Example 18
Source File: SegmentQueryResult.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public Builder(int regionsNum, int maxCacheResultSize) {
    this.regionsNum = regionsNum;
    this.queue = Queues.newConcurrentLinkedQueue();
    this.totalResultSize = new AtomicInteger();
    this.maxSegmentCacheSize = maxCacheResultSize;
}