java.util.concurrent.LinkedBlockingQueue Java Examples

The following examples show how to use java.util.concurrent.LinkedBlockingQueue. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExternalManagementRequestExecutor.java    From wildfly-core with GNU Lesser General Public License v2.1 6 votes vote down vote up
@Override
public synchronized void start(StartContext context) throws StartException {
    final String namePattern = "External Management Request Threads -- %t";
    final ThreadFactory threadFactory = doPrivileged(new PrivilegedAction<ThreadFactory>() {
        public ThreadFactory run() {
            return new JBossThreadFactory(threadGroup, Boolean.FALSE, null, namePattern, null, null);
        }
    });

    int poolSize = getPoolSize();
    if (EnhancedQueueExecutor.DISABLE_HINT) {
        final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(WORK_QUEUE_SIZE);
        executorService = new ThreadPoolExecutor(poolSize, poolSize, 60L, TimeUnit.SECONDS,
                workQueue, threadFactory);
    } else {
        executorService = new EnhancedQueueExecutor.Builder()
            .setCorePoolSize(poolSize)
            .setMaximumPoolSize(poolSize)
            .setKeepAliveTime(60L, TimeUnit.SECONDS)
            .setMaximumQueueSize(WORK_QUEUE_SIZE)
            .setThreadFactory(threadFactory)
            .build();
    }
}
 
Example #2
Source File: ThreadPoolBuilder.java    From vjtools with Apache License 2.0 6 votes vote down vote up
public ThreadPoolExecutor build() {
	BlockingQueue<Runnable> queue = null;
	if (queueSize < 1) {
		queue = new LinkedBlockingQueue<Runnable>();
	} else {
		queue = new ArrayBlockingQueue<Runnable>(queueSize);
	}

	threadFactory = createThreadFactory(threadFactory, threadNamePrefix, daemon);

	if (rejectHandler == null) {
		rejectHandler = defaultRejectHandler;
	}

	return new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS, queue, threadFactory,
			rejectHandler);
}
 
Example #3
Source File: AbstractGPlusProvider.java    From streams with Apache License 2.0 6 votes vote down vote up
@Override
public StreamsResultSet readCurrent() {
  BlockingQueue<StreamsDatum> batch = new LinkedBlockingQueue<>();
  int batchCount = 0;
  while (!this.datumQueue.isEmpty() && batchCount < MAX_BATCH_SIZE) {
    StreamsDatum datum = ComponentUtils.pollWhileNotEmpty(this.datumQueue);
    if (datum != null) {
      ++batchCount;
      ComponentUtils.offerUntilSuccess(datum, batch);
    }
  }
  boolean pullIsEmpty = batch.isEmpty() && this.datumQueue.isEmpty() && this.executor.isTerminated();
  this.isComplete.set(this.previousPullWasEmpty && pullIsEmpty);
  this.previousPullWasEmpty = pullIsEmpty;
  return new StreamsResultSet(batch);
}
 
Example #4
Source File: ServerCommunicationSystem.java    From protect with MIT License 6 votes vote down vote up
/**
 * Creates a new instance of ServerCommunicationSystem
 */
public ServerCommunicationSystem(ServerViewController controller, ServiceReplica replica) throws Exception {
	super("Server CS");

	this.controller = controller;

	inQueue = new LinkedBlockingQueue<SystemMessage>(controller.getStaticConf().getInQueueSize());

	// create a new conf, with updated port number for servers
	// TOMConfiguration serversConf = new TOMConfiguration(conf.getProcessId(),
	// Configuration.getHomeDir(), "hosts.config");

	// serversConf.increasePortNumber();

	serversConn = new ServersCommunicationLayer(controller, inQueue, replica);

	messageHandler = new MessageHandler(controller.getStaticConf().getHmacAlgorithm());

	// ******* EDUARDO BEGIN **************//
	// if (manager.isInCurrentView() || manager.isInInitView()) {
	clientsConn = CommunicationSystemServerSideFactory.getCommunicationSystemServerSide(controller);
	// }
	// ******* EDUARDO END **************//
	// start();
}
 
Example #5
Source File: BqQueueWorkerTest.java    From beast with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldReadFromQueueForeverAndPushToSink() throws InterruptedException {
    BlockingQueue<Records> queue = new LinkedBlockingQueue<>();
    BqQueueWorker worker = new BqQueueWorker("bq-worker", successfulSink, queueConfig, committer, queue, workerState);
    Records messages2 = mock(Records.class);
    when(committer.acknowledge(any())).thenReturn(true);
    queue.put(messages);
    queue.put(messages2);

    Thread workerThread = new Thread(worker);
    workerThread.start();

    await().atMost(10, TimeUnit.SECONDS).until(() -> queue.isEmpty());
    workerState.closeWorker();
    workerThread.join();
    verify(successfulSink).push(messages);
    verify(successfulSink).push(messages2);
}
 
Example #6
Source File: TestBeatsSocketChannelHandler.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() {
    eventFactory = new TestEventHolderFactory();
    channelHandlerFactory = new BeatsSocketChannelHandlerFactory<>();

    byteBuffers = new LinkedBlockingQueue<>();
    byteBuffers.add(ByteBuffer.allocate(4096));

    events = new LinkedBlockingQueue<>();
    logger = Mockito.mock(ComponentLog.class);

    maxConnections = 1;
    sslContext = null;
    charset = StandardCharsets.UTF_8;

    dispatcher = new SocketChannelDispatcher<>(eventFactory, channelHandlerFactory, byteBuffers, events, logger,
            maxConnections, sslContext, charset);

}
 
Example #7
Source File: String2HashJoinOperatorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
public static LinkedBlockingQueue<Object> transformToBinary(LinkedBlockingQueue<Object> output) {
	LinkedBlockingQueue<Object> ret = new LinkedBlockingQueue<>();
	for (Object o : output) {
		BaseRow row = ((StreamRecord<BaseRow>) o).getValue();
		BinaryRow binaryRow;
		if (row.isNullAt(0)) {
			binaryRow = newRow(row.getString(2).toString(), row.getString(3) + "null");
		} else if (row.isNullAt(2)) {
			binaryRow = newRow(row.getString(0).toString(), row.getString(1) + "null");
		} else {
			String value1 = row.getString(1).toString();
			String value2 = row.getString(3).toString();
			binaryRow = newRow(row.getString(0).toString(), value1 + value2);
		}
		ret.add(new StreamRecord(binaryRow));
	}
	return ret;
}
 
Example #8
Source File: SubscriptionTest.java    From actioncable-client-java with MIT License 6 votes vote down vote up
@Test
public void onConnectedByCustomInterface() throws URISyntaxException, InterruptedException {
    final BlockingQueue<String> events = new LinkedBlockingQueue<String>();

    final Consumer consumer = new Consumer(new URI("ws://example.com:28080"));
    final Channel channel = new Channel("CommentsChannel");
    final Subscription subscription = consumer.getSubscriptions().create(channel, CustomSubscription.class);

    final Subscription returned = subscription.onConnected(new Subscription.ConnectedCallback() {
        @Override
        public void call() {
            events.offer("onConnected");
        }
    });
    assertThat(returned, is(theInstance(subscription)));

    consumer.getSubscriptions().notifyConnected(subscription.getIdentifier());

    assertThat(events.take(), is("onConnected"));
}
 
Example #9
Source File: HttpSyncDataService.java    From soul with Apache License 2.0 6 votes vote down vote up
private void start(final HttpConfig httpConfig) {
    // init RestTemplate
    OkHttp3ClientHttpRequestFactory factory = new OkHttp3ClientHttpRequestFactory();
    factory.setConnectTimeout((int) this.connectionTimeout.toMillis());
    factory.setReadTimeout((int) HttpConstants.CLIENT_POLLING_READ_TIMEOUT);
    this.httpClient = new RestTemplate(factory);
    // It could be initialized multiple times, so you need to control that.
    if (RUNNING.compareAndSet(false, true)) {
        // fetch all group configs.
        this.fetchGroupConfig(ConfigGroupEnum.values());
        // one thread for listener, another one for fetch configuration data.
        this.executor = new ThreadPoolExecutor(3, 3, 0L, TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<>(),
                SoulThreadFactory.create("http-long-polling", true));
        // start long polling.
        this.executor.execute(new HttpLongPollingTask());
    } else {
        log.info("soul http long polling was started, executor=[{}]", executor);
    }
}
 
Example #10
Source File: AuthHttpServer.java    From swift-explorer with Apache License 2.0 6 votes vote down vote up
@Override
public Map<String, String> startAndWaitForData () throws IOException, InterruptedException
{
	InetSocketAddress addr = new InetSocketAddress(port);	
	BlockingQueue<Map<String, String> > blockingQueue = new LinkedBlockingQueue<Map<String, String> >();
	synchronized (lock)
	{
		sharedQueue = blockingQueue ;
		server = HttpServer.create(addr, 0);
		server.createContext("/", new HandlerMapParameter(blockingQueue));
		httpThreadPool = Executors.newCachedThreadPool() ;
		server.setExecutor(httpThreadPool);
		server.start();
	}
	return blockingQueue.poll(10 * 60, TimeUnit.SECONDS);
}
 
Example #11
Source File: GatewayImpl.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
/**
 * Initialize the Executor that handles listener events. Only used by
 * non-primary gateways
 */
private void initializeListenerExecutor()
{
  // Create the ThreadGroups
  final ThreadGroup loggerGroup = LogWriterImpl.createThreadGroup(
      "Gateway Listener Group", getLogger());

  // Create the Executor
  ThreadFactory tf = new ThreadFactory() {
      public Thread newThread(Runnable command) {
        Thread thread =  new Thread(loggerGroup, command, "Queued Gateway Listener Thread");
        thread.setDaemon(true);
        return thread;
      }
    };
  LinkedBlockingQueue q = new LinkedBlockingQueue();
  this._executor = new ThreadPoolExecutor(1, 1/*max unused*/,
                                          120, TimeUnit.SECONDS, q, tf);
}
 
Example #12
Source File: TestLumberjackSocketChannelHandler.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() {
    eventFactory = new TestEventHolderFactory();
    channelHandlerFactory = new LumberjackSocketChannelHandlerFactory<>();

    byteBuffers = new LinkedBlockingQueue<>();
    byteBuffers.add(ByteBuffer.allocate(4096));

    events = new LinkedBlockingQueue<>();
    logger = Mockito.mock(ComponentLog.class);

    maxConnections = 1;
    sslContext = null;
    charset = StandardCharsets.UTF_8;

    dispatcher = new SocketChannelDispatcher<>(eventFactory, channelHandlerFactory, byteBuffers, events, logger,
            maxConnections, sslContext, charset);

}
 
Example #13
Source File: PeriodicNotificationApplicationFactory.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Create a PeriodicNotificationApplication.
 * @param conf - Configuration object that specifies the parameters needed to create the application
 * @return PeriodicNotificationApplication to periodically poll Rya Fluo for new results
 * @throws PeriodicApplicationException
 */
public static PeriodicNotificationApplication getPeriodicApplication(final PeriodicNotificationApplicationConfiguration conf) throws PeriodicApplicationException {
    final Properties kafkaConsumerProps = getKafkaConsumerProperties(conf);
    final Properties kafkaProducerProps = getKafkaProducerProperties(conf);

    final BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
    final BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
    final BlockingQueue<BindingSetRecord> bindingSets = new LinkedBlockingQueue<>();

    FluoClient fluo = null;
    try {
        final PeriodicQueryResultStorage storage = getPeriodicQueryResultStorage(conf);
        fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf);
        final NotificationCoordinatorExecutor coordinator = getCoordinator(conf.getCoordinatorThreads(), notifications);
        addRegisteredNotices(coordinator, fluo.newSnapshot());
        final KafkaExporterExecutor exporter = getExporter(conf.getExporterThreads(), kafkaProducerProps, bindingSets);
        final PeriodicQueryPrunerExecutor pruner = getPruner(storage, fluo, conf.getPrunerThreads(), bins);
        final NotificationProcessorExecutor processor = getProcessor(storage, notifications, bins, bindingSets, conf.getProcessorThreads());
        final KafkaNotificationProvider provider = getProvider(conf.getProducerThreads(), conf.getNotificationTopic(), coordinator, kafkaConsumerProps);
        return PeriodicNotificationApplication.builder().setCoordinator(coordinator).setProvider(provider).setExporter(exporter)
                .setProcessor(processor).setPruner(pruner).build();
    } catch (AccumuloException | AccumuloSecurityException e) {
        throw new PeriodicApplicationException(e.getMessage());
    }
}
 
Example #14
Source File: HttpPushFactoryTest.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private void newBinding() {
    requestQueue = new LinkedBlockingQueue<>();
    responseQueue = new LinkedBlockingQueue<>();

    final Flow<HttpRequest, HttpResponse, NotUsed> handler =
            Flow.fromGraph(KillSwitches.<HttpRequest>single())
                    .mapAsync(1, request -> {
                        requestQueue.offer(request);
                        return responseQueue.take();
                    })
                    .mapMaterializedValue(killSwitch -> {
                        Objects.requireNonNull(killSwitchTrigger.peek())
                                .thenAccept(_void -> killSwitch.shutdown());
                        return NotUsed.getInstance();
                    });
    binding = Http.get(actorSystem).bindAndHandle(handler, ConnectHttp.toHost("127.0.0.1", 0), mat)
            .toCompletableFuture()
            .join();
}
 
Example #15
Source File: TestZKPlacementStateManager.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testWatchIndefinitely() throws Exception {
    TreeSet<ServerLoad> ownerships = new TreeSet<ServerLoad>();
    ownerships.add(new ServerLoad("server1"));
    final LinkedBlockingQueue<TreeSet<ServerLoad>> serverLoadNotifications =
        new LinkedBlockingQueue<TreeSet<ServerLoad>>();
    PlacementStateManager.PlacementCallback callback = new PlacementStateManager.PlacementCallback() {
        @Override
        public void callback(TreeSet<ServerLoad> serverLoads) {
            serverLoadNotifications.add(serverLoads);
        }
    };
    zkPlacementStateManager.saveOwnership(ownerships); // need to initialize the zk path before watching
    zkPlacementStateManager.watch(callback);
    // cannot verify the callback here as it may call before the verify is called

    zkPlacementStateManager.saveOwnership(ownerships);
    assertEquals(ownerships, waitForServerLoadsNotificationAsc(serverLoadNotifications, 1));

    ServerLoad server2 = new ServerLoad("server2");
    server2.addStream(new StreamLoad("hella-important-stream", 415));
    ownerships.add(server2);
    zkPlacementStateManager.saveOwnership(ownerships);
    assertEquals(ownerships, waitForServerLoadsNotificationAsc(serverLoadNotifications, 2));
}
 
Example #16
Source File: HandlerExecutor.java    From game-server with MIT License 6 votes vote down vote up
/**
 * 
 * @param corePoolSize 最小线程数,包括空闲线程
 * @param maxPoolSize 最大线程数 
 * @param keepAliveTime 当线程数大于核心时,终止多余的空闲线程等待新任务的最长时间
 * @param cacheSize 执行队列大小
 * @param prefix 线程池前缀名称
 */
public HandlerExecutor(int corePoolSize, int maxPoolSize, int keepAliveTime, int cacheSize, String prefix) {
	TimeUnit unit = TimeUnit.MINUTES;
	/**
	 * 任务队列
	 */
	LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>();
	/**
	 * 队例满到无法接受新任务时。直接抛弃
	 */
	RejectedExecutionHandler handler = new ThreadPoolExecutor.DiscardPolicy();
	
	if (prefix == null) {
		prefix = "";
	}
	ThreadFactory threadFactory = new HandlerThreadFactory(prefix);
	pool = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
}
 
Example #17
Source File: TestConnector.java    From microprofile-reactive-messaging with Apache License 2.0 5 votes vote down vote up
@Override
public SubscriberBuilder<? extends Message<String>, Void> getSubscriberBuilder(Config config) {
    String channel = config.getValue(CHANNEL_NAME_ATTRIBUTE, String.class);
    LinkedBlockingQueue<Message<String>> queue = new LinkedBlockingQueue<>();
    outgoingQueues.put(channel, queue);
    return ReactiveStreams.<Message<String>>builder().forEach(queue::add);
}
 
Example #18
Source File: Signal.java    From cyclops with Apache License 2.0 5 votes vote down vote up
/**
 * @return Signal backed by a queue
 */
public static <T> Signal<T> queueBackedSignal() {
    return new Signal<T>(
                         new Queue<T>(
                                      new LinkedBlockingQueue<T>(), null),
                         new Queue<T>(
                                      new LinkedBlockingQueue<T>(), null));
}
 
Example #19
Source File: AbstractTwillService.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
protected final void startUp() throws Exception {
  // Single thread executor that will discard task silently if it is already terminated, which only
  // happens when this service is shutting down.
  messageCallbackExecutor = new ThreadPoolExecutor(1, 1,
                                                   0L, TimeUnit.MILLISECONDS,
                                                   new LinkedBlockingQueue<Runnable>(),
                                                   Threads.createDaemonThreadFactory("message-callback"),
                                                   new ThreadPoolExecutor.DiscardPolicy());

  // Watch for session expiration, recreate the live node if reconnected after expiration.
  watcherCancellable = zkClient.addConnectionWatcher(new Watcher() {
    private boolean expired = false;

    @Override
    public void process(WatchedEvent event) {
      if (event.getState() == Event.KeeperState.Expired) {
        LOG.warn("ZK Session expired for service {} with runId {}.", getServiceName(), runId.getId());
        expired = true;
      } else if (event.getState() == Event.KeeperState.SyncConnected && expired) {
        LOG.info("Reconnected after expiration for service {} with runId {}", getServiceName(), runId.getId());
        expired = false;
        logIfFailed(createLiveNode());
      }
    }
  });

  // Create the live node, if succeeded, start the service, otherwise fail out.
  createLiveNode().get();

  // Create node for messaging
  ZKOperations.ignoreError(zkClient.create(getZKPath("messages"), null, CreateMode.PERSISTENT),
                           KeeperException.NodeExistsException.class, null).get();

  doStart();

  // Starts watching for messages
  watchMessages();
}
 
Example #20
Source File: TCPClient.java    From warp10-platform with Apache License 2.0 5 votes vote down vote up
TCPClient(Socket socket, Macro partitioner, LinkedBlockingQueue<List<Object>>[] queues, String charset) throws IOException {
  this.socket = socket;
  this.partitioner = partitioner;
  this.queues = queues;

  // TODO(tce): set socket timeout?

  remoteHost = this.socket.getInetAddress().getHostAddress();
  remotePort = this.socket.getPort();

  this.reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), charset));
  this.stack = new MemoryWarpScriptStack(AbstractWarp10Plugin.getExposedStoreClient(), AbstractWarp10Plugin.getExposedDirectoryClient(), new Properties());
  this.stack.setAttribute(WarpScriptStack.ATTRIBUTE_NAME, "[Warp10TCPPlugin " + socket.getLocalPort() + "]");
  stack.maxLimits();
}
 
Example #21
Source File: MoreExecutors.java    From slack-client with Apache License 2.0 5 votes vote down vote up
private BlockingQueue<Runnable> getQueue() {
  if (queue != null) {
    return queue;
  } else if (unbounded) {
    return new SynchronousQueue<>(fair);
  } else if (queueSize.isPresent() && queueSize.get() == 0) {
    return new SynchronousQueue<>(fair);
  } else if (queueSize.isPresent()) {
    return new ArrayBlockingQueue<>(queueSize.get(), fair);
  } else {
    return new LinkedBlockingQueue<>();
  }
}
 
Example #22
Source File: PartitionedUnorderedExecutorTest.java    From ehcache3 with Apache License 2.0 5 votes vote down vote up
@Test
public void testRunningJobsAreInterruptedAfterShutdownNow() throws InterruptedException {
  final int jobCount = 4;

  BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
  ExecutorService service = Executors.newCachedThreadPool();
  try {
    PartitionedUnorderedExecutor executor = new PartitionedUnorderedExecutor(queue, service, jobCount);

    final Semaphore jobSemaphore = new Semaphore(0);
    final Semaphore testSemaphore = new Semaphore(0);
    final AtomicInteger interrupted = new AtomicInteger();

    for (int i = 0; i < jobCount; i++) {
      executor.submit(() -> {
        testSemaphore.release();
        try {
          jobSemaphore.acquire();
        } catch (InterruptedException e) {
          interrupted.incrementAndGet();
        }
      });
    }

    testSemaphore.acquireUninterruptibly(jobCount);

    assertThat(executor.shutdownNow(), empty());
    assertThat(executor.awaitTermination(2, MINUTES), is(true));
    assertThat(executor.isShutdown(), is(true));
    assertThat(executor.isTerminated(), is(true));

    assertThat(jobSemaphore.availablePermits(), is(0));
    assertThat(interrupted.get(), is(jobCount));
  } finally {
    service.shutdown();
  }
}
 
Example #23
Source File: SimpleMonitorService.java    From dubbox with Apache License 2.0 5 votes vote down vote up
public SimpleMonitorService() {
    queue = new LinkedBlockingQueue<URL>(Integer.parseInt(ConfigUtils.getProperty("dubbo.monitor.queue", "100000")));
    writeThread = new Thread(new Runnable() {
        public void run() {
            while (running) {
                try {
                    write(); // 记录统计日志
                } catch (Throwable t) { // 防御性容错
                    logger.error("Unexpected error occur at write stat log, cause: " + t.getMessage(), t);
                    try {
                        Thread.sleep(5000); // 失败延迟
                    } catch (Throwable t2) {
                    }
                }
            }
        }
    });
    writeThread.setDaemon(true);
    writeThread.setName("DubboMonitorAsyncWriteLogThread");
    writeThread.start();
    chartFuture = scheduledExecutorService.scheduleWithFixedDelay(new Runnable() {
        public void run() {
            try {
                draw(); // 绘制图表
            } catch (Throwable t) { // 防御性容错
                logger.error("Unexpected error occur at draw stat chart, cause: " + t.getMessage(), t);
            }
        }
    }, 1, 300, TimeUnit.SECONDS);
    INSTANCE = this;
}
 
Example #24
Source File: ReadAheadCache.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
public ReadAheadCache(String streamName,
                      StatsLogger statsLogger,
                      AlertStatsLogger alertStatsLogger,
                      AsyncNotification notification,
                      int maxCachedRecords,
                      boolean deserializeRecordSet,
                      boolean traceDeliveryLatencyEnabled,
                      long deliveryLatencyWarnThresholdMillis,
                      Ticker ticker) {
    this.streamName = streamName;
    this.maxCachedRecords = maxCachedRecords;
    this.notification = notification;
    this.deserializeRecordSet = deserializeRecordSet;

    // create the readahead queue
    readAheadRecords = new LinkedBlockingQueue<LogRecordWithDLSN>();

    // start the idle reader detection
    lastEntryProcessTime = Stopwatch.createStarted(ticker);

    // Flags to control delivery latency tracing
    this.traceDeliveryLatencyEnabled = traceDeliveryLatencyEnabled;
    this.deliveryLatencyWarnThresholdMillis = deliveryLatencyWarnThresholdMillis;
    // Stats
    StatsLogger readAheadStatsLogger = statsLogger.scope("readahead");
    this.statsLogger = readAheadStatsLogger;
    this.alertStatsLogger = alertStatsLogger;
    this.readAheadDeliveryLatencyStat =
            readAheadStatsLogger.getOpStatsLogger("delivery_latency");
    this.negativeReadAheadDeliveryLatencyStat =
            readAheadStatsLogger.getOpStatsLogger("negative_delivery_latency");
}
 
Example #25
Source File: PartitionedUnorderedExecutorTest.java    From ehcache3 with Apache License 2.0 5 votes vote down vote up
@Test
public void testRunningJobIsInterruptedAfterShutdownNow() throws InterruptedException {
  BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
  ExecutorService service = Executors.newSingleThreadExecutor();
  try {
    PartitionedUnorderedExecutor executor = new PartitionedUnorderedExecutor(queue, service, 1);

    final Semaphore jobSemaphore = new Semaphore(0);
    final Semaphore testSemaphore = new Semaphore(0);
    final AtomicBoolean interrupted = new AtomicBoolean();

    executor.submit(() -> {
      testSemaphore.release();
      try {
        jobSemaphore.acquire();
      } catch (InterruptedException e) {
        interrupted.set(true);
      }
    });
    testSemaphore.acquireUninterruptibly();
    assertThat(executor.shutdownNow(), empty());
    assertThat(executor.awaitTermination(2, MINUTES), is(true));
    assertThat(executor.isShutdown(), is(true));
    assertThat(executor.isTerminated(), is(true));

    assertThat(jobSemaphore.availablePermits(), is(0));
    assertThat(interrupted.get(), is(true));
  } finally {
    service.shutdown();
  }
}
 
Example #26
Source File: BinlogWorker.java    From binlake with Apache License 2.0 5 votes vote down vote up
/**
 * open the valve to keep on dump data
 */
public void keepDump() {
    LinkedBlockingQueue<Object> thq = throttler;
    if (thq != null) {
        thq.offer(object);
    }
}
 
Example #27
Source File: ExecutorTaskAgent_Test.java    From goclipse with Eclipse Public License 1.0 5 votes vote down vote up
protected Tests_ExecutorTaskAgent(LinkedBlockingQueue<Throwable> uncaughtExceptions, String name) {
	super("TestsExecutor." + name, (throwable) -> {
		if(throwable != null) {
			uncaughtExceptions.add(throwable);
		}			
	});
	this.uncaughtExceptions = uncaughtExceptions;
}
 
Example #28
Source File: LimitedThreadPool.java    From dubbo3 with Apache License 2.0 5 votes vote down vote up
public Executor getExecutor(URL url) {
    String name = url.getParameter(Constants.THREAD_NAME_KEY, Constants.DEFAULT_THREAD_NAME);
    int cores = url.getParameter(Constants.CORE_THREADS_KEY, Constants.DEFAULT_CORE_THREADS);
    int threads = url.getParameter(Constants.THREADS_KEY, Constants.DEFAULT_THREADS);
    int queues = url.getParameter(Constants.QUEUES_KEY, Constants.DEFAULT_QUEUES);
    return new ThreadPoolExecutor(cores, threads, Long.MAX_VALUE, TimeUnit.MILLISECONDS, 
    		queues == 0 ? new SynchronousQueue<>() :
    			(queues < 0 ? new LinkedBlockingQueue<>()
    					: new LinkedBlockingQueue<>(queues)),
    		new NamedThreadFactory(name, true), new AbortPolicyWithReport(name, url));
}
 
Example #29
Source File: FifoMap.java    From jmeter-plugins with Apache License 2.0 5 votes vote down vote up
private BlockingQueue<Object> getFifo(String fifoName) {
    if (super.containsKey(fifoName)) {
        return super.get(fifoName);
    } else {
        BlockingQueue<Object> fifo = new LinkedBlockingQueue<Object>(JMeterUtils.getPropDefault(FifoMap.CAPACITY_PROP, Integer.MAX_VALUE));
        super.put(fifoName, fifo);
        return fifo;
    }
}
 
Example #30
Source File: ChannelWriterOutputView.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates an new ChannelWriterOutputView that writes to the given channel and buffers data
 * in the given memory segments. If the given memory segments are null, the writer takes its buffers
 * directly from the return queue of the writer. Note that this variant locks if no buffers are contained
 * in the return queue.
 * 
 * @param writer The writer to write to.
 * @param memory The memory used to buffer data, or null, to utilize solely the return queue.
 * @param segmentSize The size of the memory segments.
 */
public ChannelWriterOutputView(BlockChannelWriter<MemorySegment> writer, List<MemorySegment> memory, int segmentSize) {
	super(segmentSize, HEADER_LENGTH);
	
	if (writer == null) {
		throw new NullPointerException();
	}
	
	this.writer = writer;
	
	if (memory == null) {
		this.numSegments = 0;
	} else {
		this.numSegments = memory.size();
		// load the segments into the queue
		final LinkedBlockingQueue<MemorySegment> queue = writer.getReturnQueue();
		for (int i = memory.size() - 1; i >= 0; --i) {
			final MemorySegment seg = memory.get(i);
			if (seg.size() != segmentSize) {
				throw new IllegalArgumentException("The supplied memory segments are not of the specified size.");
			}
			queue.add(seg);
		}
	}
	
	// get the first segment
	try {
		advance();
	}
	catch (IOException ioex) {
		throw new RuntimeException("BUG: IOException occurred while getting first block for ChannelWriterOutputView.", ioex);
	}
}