Java Code Examples for java.util.concurrent.Executors#newFixedThreadPool()

The following examples show how to use java.util.concurrent.Executors#newFixedThreadPool() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConcurrentRpcConnectionsTest.java    From p4ic4idea with Apache License 2.0 6 votes vote down vote up
/**
 * Test concurrent RPC connections with the same user and frequent login/logout.
 */
@Test
public void testConcurrentRpcConnections() throws Exception {
  // Run concurrent reads and writes
  ExecutorService executor = Executors.newFixedThreadPool(50);
  for (int i = 0; i < 100; i++) {
    Runnable task = new GetChangelistsRunner(getIOptionsServer());
    executor.execute(task);
  }

  executor.shutdown();

  while (!executor.isTerminated()) {
    //System.out.println("Threads are still running...");
  }

  System.out.println("Finished all threads");
}
 
Example 2
Source File: HttpProxyCacheTest.java    From AndriodVideoCache with Apache License 2.0 6 votes vote down vote up
@Test   // https://github.com/danikula/AndroidVideoCache/issues/43
public void testPreventClosingOriginalSourceForNewPartialRequestWithoutCache() throws Exception {
    HttpUrlSource source = new HttpUrlSource(HTTP_DATA_BIG_URL);
    FileCache fileCache = new FileCache(ProxyCacheTestUtils.newCacheFile());
    HttpProxyCache proxyCache = new HttpProxyCache(source, fileCache);
    ExecutorService executor = Executors.newFixedThreadPool(5);
    Future<Response> firstRequestFeature = processAsync(executor, proxyCache, "GET /" + HTTP_DATA_URL + " HTTP/1.1");
    Thread.sleep(100);  // wait for first request started to process

    int offset = 30000;
    String partialRequest = "GET /" + HTTP_DATA_URL + " HTTP/1.1\nRange: bytes=" + offset + "-";
    Future<Response> secondRequestFeature = processAsync(executor, proxyCache, partialRequest);

    Response secondResponse = secondRequestFeature.get();
    Response firstResponse = firstRequestFeature.get();

    byte[] responseData = loadAssetFile(ASSETS_DATA_BIG_NAME);
    assertThat(firstResponse.data).isEqualTo(responseData);

    byte[] partialData = new byte[responseData.length - offset];
    System.arraycopy(responseData, offset, partialData, 0, partialData.length);
    assertThat(secondResponse.data).isEqualTo(partialData);
}
 
Example 3
Source File: OnErrorTest.java    From scriptella-etl with Apache License 2.0 5 votes vote down vote up
/**
 * BUG-193124 Error during script execution causes an infinite loop for an onerror handler with retry enabled
 */
public void testRetry() throws EtlExecutorException, InterruptedException {
  ExecutorService es = Executors.newFixedThreadPool(1);

  EtlExecutor etlExecutor = newEtlExecutor(getClass().getSimpleName()+"3.xml");
  es.submit((Runnable) etlExecutor);
  es.shutdown();
  es.awaitTermination(1, TimeUnit.SECONDS);
  if (!es.isTerminated()) {
    es.shutdownNow();
    fail(etlExecutor + " should be terminated, but is still running.");
  }
}
 
Example 4
Source File: ZkDistributed.java    From jlogstash-input-plugin with Apache License 2.0 5 votes vote down vote up
private void initScheduledExecutorService() {
	executors = Executors.newFixedThreadPool(5);
	MasterCheck masterCheck = new MasterCheck(this);
	executors.submit(new HearBeat(this, this.localAddress));
	executors.submit(masterCheck);
	executors.submit(new HeartBeatCheck(this, masterCheck));
	executors.submit(new DownReblance(this, masterCheck));
	executors.submit(new UpReblance(this, masterCheck));
}
 
Example 5
Source File: MultiThreadedClientExample.java    From hbase with Apache License 2.0 5 votes vote down vote up
public MultiThreadedClientExample() throws IOException {
  // Base number of threads.
  // This represents the number of threads you application has
  // that can be interacting with an hbase client.
  this.threads = Runtime.getRuntime().availableProcessors() * 4;

  // Daemon threads are great for things that get shut down.
  ThreadFactory threadFactory = new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat("internal-pol-%d").build();


  this.internalPool = Executors.newFixedThreadPool(threads, threadFactory);
}
 
Example 6
Source File: MapCacheProvider.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	final MapCacheProvider provider = new MapCacheProvider(100);
	
	provider.set("aa", "aa", 50);
	provider.set("bb", "bb", 30);
	System.out.println("cache size:" + provider.cache.size() + "-" + provider.currentCacheSize.get());
	provider.remove("aa");
	System.out.println("cache size:" + provider.cache.size() + "-" + provider.currentCacheSize.get());
	
	ExecutorService executorService = Executors.newFixedThreadPool(5);
	
	for (int i = 0; i < 500; i++) {
		int expire = RandomUtils.nextInt(1, 30);
		final String key = "key" + i + "_" + expire;
		executorService.submit(new Runnable() {
			@Override
			public void run() {
				provider.set(key, key, expire);
				try {
					Thread.sleep(TimeUnit.MILLISECONDS.toMillis(10));
				} catch (Exception e) {
				}
			}
		});
		
	}
	
	while(true){
		if(provider.cache.isEmpty())break;
	}
	
	provider.close();
	executorService.shutdown();
}
 
Example 7
Source File: MultiReceiveItemOnTest.java    From smallrye-mutiny with Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void init() {
    executor = Executors.newFixedThreadPool(4, new ThreadFactory() {
        AtomicInteger count = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread thread = new Thread(r);
            thread.setName("test-" + count.incrementAndGet());
            return thread;
        }
    });
}
 
Example 8
Source File: EpollReuseAddrTest.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 10000)
@Ignore // TODO: Unignore after making it pass on centos6-1 and debian7-1
public void testMultipleBindDatagramChannel() throws Exception {
    ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.ADVANCED);
    Assume.assumeTrue(versionEqOrGt(3, 9, 0));
    Bootstrap bootstrap = createBootstrap();
    bootstrap.option(EpollChannelOption.SO_REUSEPORT, true);
    final AtomicBoolean received1 = new AtomicBoolean();
    bootstrap.handler(new DatagramSocketTestHandler(received1));
    ChannelFuture future = bootstrap.bind().syncUninterruptibly();
    final InetSocketAddress address1 = (InetSocketAddress) future.channel().localAddress();

    final AtomicBoolean received2 = new AtomicBoolean();
    bootstrap.handler(new DatagramSocketTestHandler(received2));
    ChannelFuture future2 = bootstrap.bind(address1).syncUninterruptibly();
    final InetSocketAddress address2 = (InetSocketAddress) future2.channel().localAddress();

    Assert.assertEquals(address1, address2);
    final byte[] bytes = "data".getBytes();

    // fire up 16 Threads and send DatagramPackets to make sure we stress it enough to see DatagramPackets received
    // on both sockets.
    int count = 16;
    final CountDownLatch latch = new CountDownLatch(count);
    Runnable r = new Runnable() {
        @Override
        public void run() {
            try {
                DatagramSocket socket = new DatagramSocket();
                while (!received1.get() || !received2.get()) {
                    socket.send(new DatagramPacket(
                            bytes, 0, bytes.length, address1.getAddress(), address1.getPort()));
                }
                socket.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
            latch.countDown();
        }
    };

    ExecutorService executor = Executors.newFixedThreadPool(count);
    for (int i = 0 ; i < count; i++) {
        executor.execute(r);
    }
    latch.await();
    executor.shutdown();
    future.channel().close().syncUninterruptibly();
    future2.channel().close().syncUninterruptibly();
    Assert.assertTrue(received1.get());
    Assert.assertTrue(received2.get());
}
 
Example 9
Source File: Processor.java    From ServletContainer with GNU General Public License v3.0 4 votes vote down vote up
public static void init() {
    executorService = Executors.newFixedThreadPool(PROCESSOR_POOL_SIZE);
}
 
Example 10
Source File: KafkaConsumerContainer.java    From DBus with Apache License 2.0 4 votes vote down vote up
public void initThreadPool(int size) {
    int poolSize = size + 10;
    es = Executors.newFixedThreadPool(poolSize);
    LoggerFactory.getLogger().info("[kafka-consumer-container] initThreadPool size = " + poolSize);
}
 
Example 11
Source File: TestRuntimeMultithread.java    From kripton with Apache License 2.0 4 votes vote down vote up
/**
 * Test multithread writable.
 *
 * @throws InterruptedException the interrupted exception
 */
@Test
public void testMultithreadWritable() throws InterruptedException {		
	
	ExecutorService executor = Executors.newFixedThreadPool(5);

	for (int c = 0; c < 5; c++) {
		final int start = c * 10;
		final int threadId = c;
		Runnable worker = new Runnable() {

			@Override
			public void run() {
				int id = threadId;
				Logger.info("Start thread-" + id);
				try (BindPersonDataSource dataSource = BindPersonDataSource.open()) {
					PersonDAOImpl dao = dataSource.getPersonDAO();
					Person bean = new Person();

					for (int i = start; i < start + 10; i++) {
						bean.name = "name" + i;
						bean.surname = "surname" + i;
						dao.insertThread1(bean);
						try {
							Thread.sleep(50);
						} catch (InterruptedException e) {
							e.printStackTrace();
						}
					}
				}
				Logger.info("End thread-" + id);

			}
		};
		executor.execute(worker);
	}

	executor.shutdown();
	while (!executor.isTerminated()) {

	}

	Logger.info("Finished all thread!");

}
 
Example 12
Source File: ShardConsumerSubscriberTest.java    From amazon-kinesis-client with Apache License 2.0 4 votes vote down vote up
@Test
public void restartAfterRequestTimerExpiresWhenInitialTaskExecutionIsRejected() throws Exception {

    executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
            .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build());

    ExecutorService failingService = spy(executorService);

    doAnswer(invocation -> directlyExecuteRunnable(invocation))
            .doThrow(new RejectedExecutionException())
            .doCallRealMethod()
            .when(failingService).execute(any());

    subscriber = new ShardConsumerSubscriber(recordsPublisher, failingService, bufferSize, shardConsumer, 0);
    addUniqueItem(1);

    List<ProcessRecordsInput> received = new ArrayList<>();
    doAnswer(a -> {
        ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class);
        received.add(input);
        if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) {
            synchronized (processedNotifier) {
                processedNotifier.notifyAll();
            }
        }
        return null;
    }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class));

    // First try to start subscriptions.
    synchronized (processedNotifier) {
        subscriber.startSubscriptions();
    }

    // Verifying that there are no interactions with shardConsumer mock indicating no records were sent back and
    // subscription has not started correctly.
    verify(shardConsumer, never()).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class));

    Stream.iterate(2, i -> i + 1).limit(98).forEach(this::addUniqueItem);

    addTerminalMarker(2);

    // Doing the health check to allow the subscription to restart.
    assertThat(subscriber.healthCheck(1), nullValue());

    // Allow time for processing of the records to end in the executor thread which call notifyAll as it gets the
    // terminal record. Keeping the timeout pretty high for avoiding test failures on slow machines.
    synchronized (processedNotifier) {
        processedNotifier.wait(1000);
    }

    // Verify that shardConsumer mock was called 100 times and all 100 input records are processed.
    verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class));

    // Verify that received records in the subscriber are equal to the ones sent by the record publisher.
    assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
    Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
            eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));

}
 
Example 13
Source File: CompletableSearchServer.java    From vind with Apache License 2.0 4 votes vote down vote up
public CompletableSearchServer(SearchServer backend) {
    this(backend, Executors.newFixedThreadPool(SearchConfiguration.get(SearchConfiguration.APPLICATION_EXECUTOR_THREADS,16)), true);
}
 
Example 14
Source File: AppConfig.java    From rabbitmq-operator with Apache License 2.0 4 votes vote down vote up
@Bean
@Qualifier("STANDARD_EXECUTOR")
public ExecutorService executorService() {
    return Executors.newFixedThreadPool(RECONCILIATION_THREAD_POOL_SIZE);
}
 
Example 15
Source File: ExecutionCounterTest.java    From cloudstack with Apache License 2.0 4 votes vote down vote up
@Test
public void testConcurrentUpdatesToCounter() throws Exception {
    final ExecutionCounter executionCounter = new ExecutionCounter(0);
    final ExecutorService executorService = Executors.newFixedThreadPool(3);
    final AtomicInteger counterTask1 = new AtomicInteger(-1);
    final AtomicInteger counterTask2 = new AtomicInteger(-1);
    final AtomicInteger counterTask3 = new AtomicInteger(-1);

    final Runnable task1 = new Runnable() {
        @Override
        public void run() {
            executionCounter.incrementExecutionCounter().incrementExecutionCounter();
            executionCounter.incrementExecutionCounter().incrementExecutionCounter();
            counterTask1.set(executionCounter.getValue());
        }
    };
    final Runnable task2 = new Runnable() {
        @Override
        public void run() {
            executionCounter.incrementExecutionCounter().incrementExecutionCounter();
            counterTask2.set(executionCounter.getValue());
        }
    };
    final Runnable task3 = new Runnable() {
        @Override
        public void run() {
            counterTask3.set(executionCounter.getValue());
        }
    };

    executorService.execute(task1);
    executorService.execute(task2);
    executorService.execute(task3);

    executorService.shutdown();
    executorService.awaitTermination(5L, TimeUnit.SECONDS);

    assertThat(counterTask1.get(), equalTo(4));
    assertThat(counterTask2.get(), equalTo(2));
    assertThat(counterTask3.get(), equalTo(0));
}
 
Example 16
Source File: ThreadedRunnable.java    From hortonmachine with GNU General Public License v3.0 4 votes vote down vote up
public ThreadedRunnable( int numThreads, IHMProgressMonitor pm ) {
    this.pm = pm;
    fixedThreadPool = Executors.newFixedThreadPool(numThreads);
}
 
Example 17
Source File: AsyncConfig.java    From java-examples with MIT License 4 votes vote down vote up
@Override
public Executor getAsyncExecutor() {
    return Executors.newFixedThreadPool(threadsCount);
}
 
Example 18
Source File: SharedCounterExample.java    From ZKRecipesByExample with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException, Exception {
	final Random rand = new Random();
	SharedCounterExample example = new SharedCounterExample();
	try (TestingServer server = new TestingServer()) {
		CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new ExponentialBackoffRetry(1000, 3));
		client.start();
		
		SharedCount baseCount = new SharedCount(client, PATH, 0);
		baseCount.addListener(example);
		baseCount.start();
		
		List<SharedCount> examples = Lists.newArrayList();
		ExecutorService service = Executors.newFixedThreadPool(QTY);
		for (int i = 0; i < QTY; ++i) {
			final SharedCount count = new SharedCount(client, PATH, 0);
			examples.add(count);
			Callable<Void> task = new Callable<Void>() {
				@Override
				public Void call() throws Exception {
					count.start();
					Thread.sleep(rand.nextInt(10000));
					System.out.println("Increment:" + count.trySetCount(count.getVersionedValue(), count.getCount() + rand.nextInt(10)));
					return null;
				}
			};
			service.submit(task);
		}
		
		
		
		service.shutdown();
		service.awaitTermination(10, TimeUnit.MINUTES);
		
		for (int i = 0; i < QTY; ++i) {
			examples.get(i).close();
		}
		baseCount.close();
	}


}
 
Example 19
Source File: ArchiveAnalysisManager.java    From steady with Apache License 2.0 3 votes vote down vote up
/**
 * <p>Constructor for ArchiveAnalysisManager.</p>
 *
 * @param _pool_size the number of parallel analysis threads
 * @param _timeout the timeout in milleseconds to wait for the completion of all analysis tasks (-1 means no timeout)
 * @param _instr whether or not the Java archives shall be instrumented
 * @param _ctx the application context in which the analysis takes place (if any)
 */
public ArchiveAnalysisManager(int _pool_size, long _timeout, boolean _instr, Application _ctx) {
	this.pool = Executors.newFixedThreadPool(_pool_size);
	this.analysisTimeout = _timeout;
	this.instrument = _instr;
	this.ctx = _ctx;
}
 
Example 20
Source File: BackgroundInitializer.java    From astor with GNU General Public License v2.0 2 votes vote down vote up
/**
 * Creates the {@code ExecutorService} to be used. This method is called if
 * no {@code ExecutorService} was provided at construction time.
 *
 * @return the {@code ExecutorService} to be used
 */
private ExecutorService createExecutor() {
    return Executors.newFixedThreadPool(getTaskCount());
}