Java Code Examples for java.util.concurrent.LinkedBlockingQueue#take()

The following examples show how to use java.util.concurrent.LinkedBlockingQueue#take() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BookmarkWidgetService.java    From 365browser with Apache License 2.0 6 votes vote down vote up
@BinderThread
private BookmarkFolder loadBookmarks(final BookmarkId folderId) {
    final LinkedBlockingQueue<BookmarkFolder> resultQueue = new LinkedBlockingQueue<>(1);
    //A reference of BookmarkLoader is needed in binder thread to
    //prevent it from being garbage collected.
    final BookmarkLoader bookmarkLoader = new BookmarkLoader();
    ThreadUtils.runOnUiThread(new Runnable() {
        @Override
        public void run() {
            bookmarkLoader.initialize(mContext, folderId, new BookmarkLoaderCallback() {
                @Override
                public void onBookmarksLoaded(BookmarkFolder folder) {
                    resultQueue.add(folder);
                }
            });
        }
    });
    try {
        return resultQueue.take();
    } catch (InterruptedException e) {
        return null;
    }
}
 
Example 2
Source File: PublisherFailureTest.java    From async-google-pubsub-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testTimeout() throws InterruptedException, ExecutionException {
  final LinkedBlockingQueue<PubsubFuture<List<String>>> t1 = new LinkedBlockingQueue<>();
  final LinkedBlockingQueue<PubsubFuture<List<String>>> t2 = new LinkedBlockingQueue<>();
  topics.put("t1", t1);
  topics.put("t2", t2);

  final Message m1 = Message.of("1");
  final Message m2 = Message.of("2");

  // Fail the first request
  final CompletableFuture<String> f1 = publisher.publish("t1", m1);
  final PubsubFuture<List<String>> r1 = t1.take();
  r1.fail(new TimeoutException());

  // Verify that the second request goes through
  final CompletableFuture<String> f2 = publisher.publish("t2", m2);
  final PubsubFuture<List<String>> r2 = t2.take();
  r2.succeed(singletonList("id2"));
  final String id2 = f2.get();
  assertThat(id2, is(r2.get().get(0)));
}
 
Example 3
Source File: BookmarkWidgetService.java    From delion with Apache License 2.0 6 votes vote down vote up
@BinderThread
private BookmarkFolder loadBookmarks(final BookmarkId folderId) {
    final LinkedBlockingQueue<BookmarkFolder> resultQueue = new LinkedBlockingQueue<>(1);
    ThreadUtils.runOnUiThread(new Runnable() {
        @Override
        public void run() {
            new BookmarkLoader(mContext, folderId, new BookmarkLoaderCallback() {
                @Override
                public void onBookmarksLoaded(BookmarkFolder folder) {
                    resultQueue.add(folder);
                }
            });
        }
    });
    try {
        return resultQueue.take();
    } catch (InterruptedException e) {
        return null;
    }
}
 
Example 4
Source File: BookmarkWidgetService.java    From AndroidChromium with Apache License 2.0 6 votes vote down vote up
@BinderThread
private BookmarkFolder loadBookmarks(final BookmarkId folderId) {
    final LinkedBlockingQueue<BookmarkFolder> resultQueue = new LinkedBlockingQueue<>(1);
    //A reference of BookmarkLoader is needed in binder thread to
    //prevent it from being garbage collected.
    final BookmarkLoader bookmarkLoader = new BookmarkLoader();
    ThreadUtils.runOnUiThread(new Runnable() {
        @Override
        public void run() {
            bookmarkLoader.initialize(mContext, folderId, new BookmarkLoaderCallback() {
                @Override
                public void onBookmarksLoaded(BookmarkFolder folder) {
                    resultQueue.add(folder);
                }
            });
        }
    });
    try {
        return resultQueue.take();
    } catch (InterruptedException e) {
        return null;
    }
}
 
Example 5
Source File: DefaultLocalQueue.java    From SeimiCrawler with Apache License 2.0 5 votes vote down vote up
@Override
public Request bPop(String crawlerName) {
    try {
        LinkedBlockingQueue<Request> queue = getQueue(crawlerName);
        return queue.take();
    } catch (InterruptedException e) {
        logger.error(e.getMessage(),e);
    }
    return null;
}
 
Example 6
Source File: MailQueueContract.java    From james-project with Apache License 2.0 5 votes vote down vote up
@Test
default void dequeueShouldNotReturnInProcessingEmails() throws Exception {
    enQueue(defaultMail()
        .name("name")
        .build());

    LinkedBlockingQueue<MailQueue.MailQueueItem> queue = new LinkedBlockingQueue<>(1);
    Flux.from(getMailQueue().deQueue()).subscribeOn(Schedulers.elastic()).subscribe(Throwing.consumer(queue::put));
    queue.take();

    assertThat(queue.poll(2, TimeUnit.SECONDS)).isNull();
}
 
Example 7
Source File: TestZKPlacementStateManager.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private TreeSet<ServerLoad> waitForServerLoadsNotificationAsc(
    LinkedBlockingQueue<TreeSet<ServerLoad>> notificationQueue,
    int expectedNumServerLoads) throws InterruptedException {
    TreeSet<ServerLoad> notification = notificationQueue.take();
    assertNotNull(notification);
    while (notification.size() < expectedNumServerLoads) {
        notification = notificationQueue.take();
    }
    assertEquals(expectedNumServerLoads, notification.size());
    return notification;
}
 
Example 8
Source File: PublisherTest.java    From async-google-pubsub-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testSizeBoundedBatching() throws InterruptedException, ExecutionException {
  final LinkedBlockingQueue<Request> t = new LinkedBlockingQueue<>();
  topics.put("t", t);

  publisher = Publisher.builder()
      .project("test")
      .pubsub(pubsub)
      .batchSize(2)
      .maxLatencyMs(DAYS.toMillis(1))
      .build();

  final Message m1 = Message.of("1");
  final Message m2 = Message.of("2");

  // Publish a single message
  publisher.publish("t", m1);

  // Verify that the batch is not sent
  Thread.sleep(1000);
  verify(pubsub, never()).publish(anyString(), anyString(), anyListOf(Message.class));

  // Send one more message, completing the batch.
  publisher.publish("t", m2);

  // Check that the batch got sent.
  verify(pubsub, timeout(5000)).publish(anyString(), anyString(), anyListOf(Message.class));
  final Request request = t.take();
  assertThat(request.messages.size(), is(2));
}
 
Example 9
Source File: SmartContractSupportService.java    From julongchain with Apache License 2.0 5 votes vote down vote up
/**
 * invoke智能合约
 *
 * @param smartContractId      智能合约编号
 * @param smartContractMessage 消息
 */
public synchronized static SmartContractMessage invoke(
        String smartContractId, SmartContractMessage smartContractMessage) throws SmartContractException{
    logger.info("invoke " + smartContractId);

    // 修改消息的type为TRANSACTION
    SmartContractMessage message =
            SmartContractMessage.newBuilder()
                    .mergeFrom(smartContractMessage)
                    // .setType(SmartContractMessage.Type.TRANSACTION)
                    .build();

    updateSmartContractStatus(smartContractId, SMART_CONTRACT_STATUS_BUSY);
    String txId = smartContractMessage.getTxid();
    addTxId(txId, smartContractId);
    updateTxStatus(smartContractId, txId, TX_STATUS_START);

    // 保存
    LinkedBlockingQueue<SmartContractMessage> queue = new LinkedBlockingQueue<SmartContractMessage>();
    txIdAndQueueMap.put(txId + "-" + smartContractId, queue);
    send(smartContractId, message);

    SmartContractMessage receiveMessage = null;
    try {
        receiveMessage = queue.take();
    } catch (InterruptedException e) {
        logger.error(e.getMessage(), e);
        throw new SmartContractException(e.getMessage());
    }

    return receiveMessage;
}
 
Example 10
Source File: DistributedLogTool.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private int bkRecovery(final BookKeeperAdmin bkAdmin, final LinkedBlockingQueue<Long> ledgers,
                       final Set<BookieSocketAddress> bookieAddrs,
                       final boolean dryrun, final boolean skipOpenLedgers)
        throws InterruptedException, BKException {
    final AtomicInteger numPendings = new AtomicInteger(ledgers.size());
    final ExecutorService executorService = Executors.newCachedThreadPool();
    final CountDownLatch doneLatch = new CountDownLatch(concurrency);
    Runnable r = new Runnable() {
        @Override
        public void run() {
            while (!ledgers.isEmpty()) {
                long lid = -1L;
                try {
                    lid = ledgers.take();
                    System.out.println("Recovering ledger " + lid);
                    bkAdmin.recoverBookieData(lid, bookieAddrs, dryrun, skipOpenLedgers);
                    System.out.println("Recovered ledger completed : " + lid + ", " + numPendings.decrementAndGet() + " left");
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    doneLatch.countDown();
                    break;
                } catch (BKException ke) {
                    System.out.println("Recovered ledger failed : " + lid + ", rc = " + BKException.getMessage(ke.getCode()));
                }
            }
            doneLatch.countDown();
        }
    };
    for (int i = 0; i < concurrency; i++) {
        executorService.submit(r);
    }
    doneLatch.await();
    SchedulerUtils.shutdownScheduler(executorService, 2, TimeUnit.MINUTES);
    return 0;
}
 
Example 11
Source File: BeamFnLoggingServiceTest.java    From beam with Apache License 2.0 4 votes vote down vote up
@Test
public void testServerCloseHangsUpClients() throws Exception {
  LinkedBlockingQueue<BeamFnApi.LogEntry> logs = new LinkedBlockingQueue<>();
  ExecutorService executorService = Executors.newCachedThreadPool();
  Collection<Future<Void>> futures = new ArrayList<>();
  try (BeamFnLoggingService service =
      new BeamFnLoggingService(
          findOpenPort(),
          logs::add,
          ServerStreamObserverFactory.fromOptions(PipelineOptionsFactory.create())::from,
          GrpcContextHeaderAccessorProvider.getHeaderAccessor())) {
    server = createServer(service, service.getApiServiceDescriptor());

    for (int i = 1; i <= 3; ++i) {
      long instructionId = i;
      futures.add(
          executorService.submit(
              () -> {
                CountDownLatch waitForServerHangup = new CountDownLatch(1);
                ManagedChannel channel =
                    InProcessChannelBuilder.forName(service.getApiServiceDescriptor().getUrl())
                        .build();
                StreamObserver<BeamFnApi.LogEntry.List> outboundObserver =
                    BeamFnLoggingGrpc.newStub(channel)
                        .logging(
                            TestStreams.withOnNext(BeamFnLoggingServiceTest::discardMessage)
                                .withOnCompleted(waitForServerHangup::countDown)
                                .build());
                outboundObserver.onNext(createLogsWithIds(instructionId));
                waitForServerHangup.await();
                return null;
              }));
    }
    // Wait till each client has sent their message showing that they have connected.
    for (int i = 1; i <= 3; ++i) {
      logs.take();
    }
    service.close();
    server.shutdownNow();
  }

  for (Future<Void> future : futures) {
    future.get();
  }
}
 
Example 12
Source File: IntegerPolynomial.java    From RipplePower with Apache License 2.0 4 votes vote down vote up
/**
 * Multithreaded version of {@link #resultant()}.
 *
 * @return <code>(rho, res)</code> satisfying <code>res = rho*this + t*(x^n-1)</code> for some integer <code>t</code>.
 */
public Resultant resultantMultiThread()
{
    int N = coeffs.length;

    // upper bound for resultant(f, g) = ||f, 2||^deg(g) * ||g, 2||^deg(f) = squaresum(f)^(N/2) * 2^(deg(f)/2) because g(x)=x^N-1
    // see http://jondalon.mathematik.uni-osnabrueck.de/staff/phpages/brunsw/CompAlg.pdf chapter 3
    BigInteger max = squareSum().pow((N + 1) / 2);
    max = max.multiply(BigInteger.valueOf(2).pow((degree() + 1) / 2));
    BigInteger max2 = max.multiply(BigInteger.valueOf(2));

    // compute resultants modulo prime numbers
    BigInteger prime = BigInteger.valueOf(10000);
    BigInteger pProd = Constants.BIGINT_ONE;
    LinkedBlockingQueue<Future<ModularResultant>> resultantTasks = new LinkedBlockingQueue<Future<ModularResultant>>();
    Iterator<BigInteger> primes = BIGINT_PRIMES.iterator();
    ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    while (pProd.compareTo(max2) < 0)
    {
        if (primes.hasNext())
        {
            prime = primes.next();
        }
        else
        {
            prime = prime.nextProbablePrime();
        }
        Future<ModularResultant> task = executor.submit(new ModResultantTask(prime.intValue()));
        resultantTasks.add(task);
        pProd = pProd.multiply(prime);
    }

    // Combine modular resultants to obtain the resultant.
    // For efficiency, first combine all pairs of small resultants to bigger resultants,
    // then combine pairs of those, etc. until only one is left.
    ModularResultant overallResultant = null;
    while (!resultantTasks.isEmpty())
    {
        try
        {
            Future<ModularResultant> modRes1 = resultantTasks.take();
            Future<ModularResultant> modRes2 = resultantTasks.poll();
            if (modRes2 == null)
            {
                // modRes1 is the only one left
                overallResultant = modRes1.get();
                break;
            }
            Future<ModularResultant> newTask = executor.submit(new CombineTask(modRes1.get(), modRes2.get()));
            resultantTasks.add(newTask);
        }
        catch (Exception e)
        {
            throw new IllegalStateException(e.toString());
        }
    }
    executor.shutdown();
    BigInteger res = overallResultant.res;
    BigIntPolynomial rhoP = overallResultant.rho;

    BigInteger pProd2 = pProd.divide(BigInteger.valueOf(2));
    BigInteger pProd2n = pProd2.negate();

    if (res.compareTo(pProd2) > 0)
    {
        res = res.subtract(pProd);
    }
    if (res.compareTo(pProd2n) < 0)
    {
        res = res.add(pProd);
    }

    for (int i = 0; i < N; i++)
    {
        BigInteger c = rhoP.coeffs[i];
        if (c.compareTo(pProd2) > 0)
        {
            rhoP.coeffs[i] = c.subtract(pProd);
        }
        if (c.compareTo(pProd2n) < 0)
        {
            rhoP.coeffs[i] = c.add(pProd);
        }
    }

    return new Resultant(rhoP, res);
}
 
Example 13
Source File: IntegerPolynomial.java    From ripple-lib-java with ISC License 4 votes vote down vote up
/**
 * Multithreaded version of {@link #resultant()}.
 *
 * @return <code>(rho, res)</code> satisfying <code>res = rho*this + t*(x^n-1)</code> for some integer <code>t</code>.
 */
public Resultant resultantMultiThread()
{
    int N = coeffs.length;

    // upper bound for resultant(f, g) = ||f, 2||^deg(g) * ||g, 2||^deg(f) = squaresum(f)^(N/2) * 2^(deg(f)/2) because g(x)=x^N-1
    // see http://jondalon.mathematik.uni-osnabrueck.de/staff/phpages/brunsw/CompAlg.pdf chapter 3
    BigInteger max = squareSum().pow((N + 1) / 2);
    max = max.multiply(BigInteger.valueOf(2).pow((degree() + 1) / 2));
    BigInteger max2 = max.multiply(BigInteger.valueOf(2));

    // compute resultants modulo prime numbers
    BigInteger prime = BigInteger.valueOf(10000);
    BigInteger pProd = Constants.BIGINT_ONE;
    LinkedBlockingQueue<Future<ModularResultant>> resultantTasks = new LinkedBlockingQueue<Future<ModularResultant>>();
    Iterator<BigInteger> primes = BIGINT_PRIMES.iterator();
    ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    while (pProd.compareTo(max2) < 0)
    {
        if (primes.hasNext())
        {
            prime = primes.next();
        }
        else
        {
            prime = prime.nextProbablePrime();
        }
        Future<ModularResultant> task = executor.submit(new ModResultantTask(prime.intValue()));
        resultantTasks.add(task);
        pProd = pProd.multiply(prime);
    }

    // Combine modular resultants to obtain the resultant.
    // For efficiency, first combine all pairs of small resultants to bigger resultants,
    // then combine pairs of those, etc. until only one is left.
    ModularResultant overallResultant = null;
    while (!resultantTasks.isEmpty())
    {
        try
        {
            Future<ModularResultant> modRes1 = resultantTasks.take();
            Future<ModularResultant> modRes2 = resultantTasks.poll();
            if (modRes2 == null)
            {
                // modRes1 is the only one left
                overallResultant = modRes1.get();
                break;
            }
            Future<ModularResultant> newTask = executor.submit(new CombineTask(modRes1.get(), modRes2.get()));
            resultantTasks.add(newTask);
        }
        catch (Exception e)
        {
            throw new IllegalStateException(e.toString());
        }
    }
    executor.shutdown();
    BigInteger res = overallResultant.res;
    BigIntPolynomial rhoP = overallResultant.rho;

    BigInteger pProd2 = pProd.divide(BigInteger.valueOf(2));
    BigInteger pProd2n = pProd2.negate();

    if (res.compareTo(pProd2) > 0)
    {
        res = res.subtract(pProd);
    }
    if (res.compareTo(pProd2n) < 0)
    {
        res = res.add(pProd);
    }

    for (int i = 0; i < N; i++)
    {
        BigInteger c = rhoP.coeffs[i];
        if (c.compareTo(pProd2) > 0)
        {
            rhoP.coeffs[i] = c.subtract(pProd);
        }
        if (c.compareTo(pProd2n) < 0)
        {
            rhoP.coeffs[i] = c.add(pProd);
        }
    }

    return new Resultant(rhoP, res);
}
 
Example 14
Source File: GrpcLoggingServiceTest.java    From beam with Apache License 2.0 4 votes vote down vote up
@Test
public void testServerCloseHangsUpClients() throws Exception {
  LinkedBlockingQueue<LogEntry> logs = new LinkedBlockingQueue<>();
  ExecutorService executorService = Executors.newCachedThreadPool();
  Collection<Future<Void>> futures = new ArrayList<>();
  final GrpcLoggingService service =
      GrpcLoggingService.forWriter(new CollectionAppendingLogWriter(logs));
  try (GrpcFnServer<GrpcLoggingService> server =
      GrpcFnServer.allocatePortAndCreateFor(service, InProcessServerFactory.create())) {

    for (int i = 1; i <= 3; ++i) {
      final long instructionId = i;
      futures.add(
          executorService.submit(
              () -> {
                {
                  CountDownLatch waitForServerHangup = new CountDownLatch(1);
                  ManagedChannel channel =
                      InProcessChannelBuilder.forName(server.getApiServiceDescriptor().getUrl())
                          .build();
                  StreamObserver<LogEntry.List> outboundObserver =
                      BeamFnLoggingGrpc.newStub(channel)
                          .logging(
                              TestStreams.withOnNext(messageDiscarder)
                                  .withOnCompleted(new CountDown(waitForServerHangup))
                                  .build());
                  outboundObserver.onNext(createLogsWithIds(instructionId));
                  waitForServerHangup.await();
                  return null;
                }
              }));
    }
    // Wait till each client has sent their message showing that they have connected.
    for (int i = 1; i <= 3; ++i) {
      logs.take();
    }
  }
  for (Future<Void> future : futures) {
    future.get();
  }
}
 
Example 15
Source File: AutoScaleProcessorTest.java    From pravega with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 10000)
public void writerCreationTest() throws Exception {
    EventStreamClientFactory clientFactory = mock(EventStreamClientFactory.class);
    CompletableFuture<Void> createWriterLatch = new CompletableFuture<>();
    doAnswer(x -> {
        createWriterLatch.complete(null);
        throw new RuntimeException();
    }).when(clientFactory).createEventWriter(any(), any(), any());

    TestAutoScaleProcessor failingWriterProcessor = new TestAutoScaleProcessor(
            AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(),
            clientFactory,
            executorService());
    String segmentStreamName = "scope/myStreamSegment/0.#epoch.0";
    failingWriterProcessor.notifyCreated(segmentStreamName);
    assertFalse(failingWriterProcessor.isInitializeStarted());
    AtomicReference<EventStreamWriter<AutoScaleEvent>> w = new AtomicReference<>();

    AssertExtensions.assertThrows("Bootstrap should not be initiated until isInitializeStarted is true", 
            () -> failingWriterProcessor.bootstrapOnce(clientFactory, w),
            e -> Exceptions.unwrap(e) instanceof RuntimeException);

    // report but since the cooldown time hasnt elapsed, no scale event should be attempted. So no writer should be initialized yet. 
    failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
    assertFalse(failingWriterProcessor.isInitializeStarted());
    
    failingWriterProcessor.setTimeMillis(20 * 60000L);
    failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
    // the above should initiate the bootstrap.
    assertTrue(failingWriterProcessor.isInitializeStarted());

    // since we are throwing on writer creation, wait until the writer is invoked once at least
    createWriterLatch.join();

    // now close the processor. The writer future should get cancelled.
    failingWriterProcessor.close();
    assertTrue(failingWriterProcessor.getWriterFuture().isCancelled());

    // create new processor and let the writer get created 
    TestAutoScaleProcessor processor = new TestAutoScaleProcessor(
            AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(),
            clientFactory,
            executorService());

    LinkedBlockingQueue<AutoScaleEvent> queue = new LinkedBlockingQueue<>();
    EventStreamWriter<AutoScaleEvent> writerMock = createWriter(queue::add);
    doAnswer(x -> writerMock).when(clientFactory).createEventWriter(any(), any(), any());

    processor.notifyCreated(segmentStreamName);
    
    // report a low rate to trigger a scale down 
    processor.setTimeMillis(21 * 60000L);
    processor.report(segmentStreamName, 10, 0L, 1.0, 1.0, 1.0, 1.0);
    assertTrue(processor.isInitializeStarted());

    AssertExtensions.assertEventuallyEquals(writerMock, () -> processor.getWriterFuture().join(), 10000L);
    AutoScaleEvent event = queue.take();
    assertEquals(event.getDirection(), AutoScaleEvent.DOWN);
    
    processor.close();
    
    // create third writer, this time supply the writer directly
    EventStreamWriter<AutoScaleEvent> writer = spy(createWriter(e -> { }));
    
    // verify that when writer is set, we are able to get the processor initialized
    TestAutoScaleProcessor processor2 = new TestAutoScaleProcessor(writer,
            AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(),
            executorService());
    
    processor2.notifyCreated(segmentStreamName);
    assertFalse(processor2.isInitializeStarted());
    processor2.setTimeMillis(20 * 60000L);
    processor2.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
    // the above should create a writer future. 
    assertTrue(processor2.isInitializeStarted());

    assertTrue(Futures.isSuccessful(processor2.getWriterFuture()));
    processor2.close();
    verify(writer, times(1)).close();
}
 
Example 16
Source File: TCPManager.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
private void initExecutors() {

    executors = new Thread[parallelism];

    for (int i = 0; i < parallelism; i++) {

      final MemoryWarpScriptStack stack = new MemoryWarpScriptStack(AbstractWarp10Plugin.getExposedStoreClient(), AbstractWarp10Plugin.getExposedDirectoryClient(), new Properties());
      stack.maxLimits();

      final LinkedBlockingQueue<List<Object>> queue = queues[Math.min(i, queues.length - 1)];

      executors[i] = new Thread() {
        @Override
        public void run() {
          while (true) {

            try {
              List<List<Object>> msgs = new ArrayList<List<Object>>();

              if (timeout > 0) {
                List<Object> msg = queue.poll(timeout, TimeUnit.MILLISECONDS);
                if (null != msg) {
                  msgs.add(msg);
                  queue.drainTo(msgs, maxMessages - 1);
                }
              } else {
                List<Object> msg = queue.take();
                msgs.add(msg);
                queue.drainTo(msgs, maxMessages - 1);
              }

              stack.clear();

              if (0 < msgs.size()) {
                stack.push(msgs);
              } else {
                stack.push(null);
              }

              stack.exec(macro);
            } catch (InterruptedException e) {
              return;
            } catch (WarpScriptStopException wsse) {
            } catch (Exception e) {
              e.printStackTrace();
            }
          }
        }
      };

      executors[i].setName("[TCP Executor on port " + port + " #" + i + "]");
      executors[i].setDaemon(true);
      executors[i].start();
    }
  }
 
Example 17
Source File: SuggestHttpWorker.java    From splicer with Apache License 2.0 4 votes vote down vote up
@Override
public String call() throws Exception {
	LinkedBlockingQueue<String> TSDs;

	//TODO: have it implement its own RegionChecker to get hbase locality looking for metric names
	//lets have it just pick a random host
	String hostname = getRandomHost();
	TSDs = HttpWorker.TSDMap.get(hostname);

	if (TSDs == null) {
		LOG.error("We are not running TSDs on regionserver={}. Choosing a random host failed", hostname);
		return "{'error': 'Choice of hostname=" + hostname + " failed.'}";
	}

	String server = TSDs.take();
	String uri = "http://" + server + "/api/suggest?" + suggestQuery;

	CloseableHttpClient postman = HttpClientBuilder.create().build();
	try {
		HttpGet getRequest = new HttpGet(uri);

		LOG.info("Sending query=" + uri + " to TSD running on host=" + hostname);

		HttpResponse response = postman.execute(getRequest);

		if (response.getStatusLine().getStatusCode() != 200) {
			throw new RuntimeException("Failed : HTTP error code : "
					+ response.getStatusLine().getStatusCode());
		}

		List<String> dl = IOUtils.readLines(response.getEntity().getContent());
		String result = StringUtils.join(dl, "");
		LOG.info("Result={}", result);

		return result;
	} finally {
		IOUtils.closeQuietly(postman);

		TSDs.put(server);
		LOG.info("Returned {} into the available queue", server);
	}
}
 
Example 18
Source File: HttpWorker.java    From splicer with Apache License 2.0 4 votes vote down vote up
@Override
public String call() throws Exception
{
	LOG.debug("Start time={}, End time={}", Const.tsFormat(query.startTime()),
			Const.tsFormat(query.endTime()));

	String metricName = query.getQueries().get(0).getMetric();
	String cacheResult = JedisClient.get().get(this.query.toString());
	if (cacheResult != null) {
		LOG.debug("Cache hit for start=" + query.startTime()
				+ ", end=" + query.endTime() + ", metric=" + metricName);
		return cacheResult;
	}

	String hostname = checker.getBestRegionHost(metricName,
			query.startTime() / 1000, query.endTime() / 1000);
	LOG.debug("Found region server hostname={} for metric={}", hostname, metricName);

	LinkedBlockingQueue<String> TSDs;
	if (hostname == null) {
		LOG.error("Could not find region server for metric={}", metricName);
		return "{'error': 'Could not find region server for metric=" + metricName + "'}";
	}

	TSDs = TSDMap.get(hostname);
	if (TSDs == null) {
		String host = select(); // randomly select a host (basic load balancing)
		TSDs = TSDMap.get(host);
		if (TSDs == null) {
			LOG.error("We are not running TSDs on regionserver={}. Fallback failed. Returning error", hostname);
			return "{'error': 'Fallback to hostname=" + hostname + " failed.'}";
		} else {
			LOG.info("Falling back to " + host + " for queries");
		}
	}

	String server = TSDs.take();
	String uri = "http://" + server + "/api/query/qexp/";

	CloseableHttpClient postman = HttpClientBuilder.create().build();
	try {

		HttpPost postRequest = new HttpPost(uri);

		StringEntity input = new StringEntity(JSON.serializeToString(query));
		input.setContentType("application/json");
		postRequest.setEntity(input);
		LOG.debug("Sending request to: {} for query {} ", uri, query);

		HttpResponse response = postman.execute(postRequest);

		if (response.getStatusLine().getStatusCode() != 200) {
			throw new RuntimeException("Failed : HTTP error code : "
					+ response.getStatusLine().getStatusCode());
		}

		List<String> dl = IOUtils.readLines(response.getEntity().getContent());
		String result = StringUtils.join(dl, "");
		LOG.debug("Result={}", result);
		if (isCacheable(query)) {
			JedisClient.get().put(this.query.toString(), result);
		}
		return result;
	} finally {
		IOUtils.closeQuietly(postman);

		TSDs.put(server);
		LOG.debug("Returned {} into the available queue", server);
	}
}
 
Example 19
Source File: AioClient.java    From talent-aio with GNU Lesser General Public License v2.1 4 votes vote down vote up
/**
 * 启动重连任务
 * 
 *
 * @author: tanyaowu
 * @创建时间: 2017年1月11日 下午5:48:17
 *
 */
private void startReconnTask()
{
	final ReconnConf<SessionContext, P, R> reconnConf = clientGroupContext.getReconnConf();
	if (reconnConf == null || reconnConf.getInterval() <= 0)
	{
		return;
	}

	final String id = clientGroupContext.getId();
	Thread thread = new Thread(new Runnable()
	{
		@Override
		public void run()
		{
			while (!clientGroupContext.isStopped())
			{
				log.info("准备重连");
				LinkedBlockingQueue<ChannelContext<SessionContext, P, R>> queue = reconnConf.getQueue();
				ClientChannelContext<SessionContext, P, R> channelContext = null;
				try
				{
					channelContext = (ClientChannelContext<SessionContext, P, R>) queue.take();
				} catch (InterruptedException e1)
				{
					log.error(e1.toString(), e1);
				}
				if (channelContext == null)
				{
					continue;
					//						return;
				}

				if (channelContext.isRemoved()) //已经删除的,不需要重新再连
				{
					continue;
				}

				long currtime = SystemTimer.currentTimeMillis();
				long timeInReconnQueue = channelContext.getStat().getTimeInReconnQueue();
				long sleeptime = reconnConf.getInterval() - (currtime - timeInReconnQueue);
				log.info("sleeptime:{}, closetime:{}", sleeptime, timeInReconnQueue);
				if (sleeptime > 0)
				{
					try
					{
						Thread.sleep(sleeptime);
					} catch (InterruptedException e)
					{
						log.error(e.toString(), e);
					}
				}

				if (channelContext.isRemoved() || !channelContext.isClosed()) //已经删除的和已经连上的,不需要重新再连
				{
					continue;
				}
				ReconnRunnable<SessionContext, P, R> runnable = new ReconnRunnable<SessionContext, P, R>(channelContext, AioClient.this);
				reconnConf.getThreadPoolExecutor().execute(runnable);
			}
		}
	});
	thread.setName("t-aio-timer-reconnect" + id);
	thread.setDaemon(true);
	thread.start();

}
 
Example 20
Source File: TioClient.java    From t-io with Apache License 2.0 4 votes vote down vote up
/**
 * 启动重连任务
 *
 * @author tanyaowu
 *
 */
private void startReconnTask() {
	final ReconnConf reconnConf = clientTioConfig.getReconnConf();
	if (reconnConf == null || reconnConf.getInterval() <= 0) {
		return;
	}

	final String id = clientTioConfig.getId();
	Thread thread = new Thread(new Runnable() {
		@Override
		public void run() {
			while (!clientTioConfig.isStopped()) {
				log.error("closeds:{}, connections:{}", clientTioConfig.closeds.size(), clientTioConfig.connections.size());
				//log.info("准备重连");
				LinkedBlockingQueue<ChannelContext> queue = reconnConf.getQueue();
				ClientChannelContext channelContext = null;
				try {
					channelContext = (ClientChannelContext) queue.take();
				} catch (InterruptedException e1) {
					log.error(e1.toString(), e1);
				}
				if (channelContext == null) {
					continue;
					//						return;
				}

				if (channelContext.isRemoved) //已经删除的,不需要重新再连
				{
					continue;
				}

				SslFacadeContext sslFacadeContext = channelContext.sslFacadeContext;
				if (sslFacadeContext != null) {
					sslFacadeContext.setHandshakeCompleted(false);
				}

				long sleeptime = reconnConf.getInterval() - (SystemTimer.currTime - channelContext.stat.timeInReconnQueue);
				//log.info("sleeptime:{}, closetime:{}", sleeptime, timeInReconnQueue);
				if (sleeptime > 0) {
					try {
						Thread.sleep(sleeptime);
					} catch (InterruptedException e) {
						log.error(e.toString(), e);
					}
				}

				if (channelContext.isRemoved || !channelContext.isClosed) //已经删除的和已经连上的,不需要重新再连
				{
					continue;
				} else {
					ReconnRunnable runnable = channelContext.getReconnRunnable();
					if (runnable == null) {
						synchronized (channelContext) {
							runnable = channelContext.getReconnRunnable();
							if (runnable == null) {
								runnable = new ReconnRunnable(channelContext, TioClient.this, reconnConf.getThreadPoolExecutor());
								channelContext.setReconnRunnable(runnable);
							}
						}
					}
					runnable.execute();
					//						reconnConf.getThreadPoolExecutor().execute(runnable);
				}
			}
		}
	});
	thread.setName("tio-timer-reconnect-" + id);
	thread.setDaemon(true);
	thread.start();

}