Java Code Examples for java.util.concurrent.CyclicBarrier#await()

The following examples show how to use java.util.concurrent.CyclicBarrier#await() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ResultSyncPointTest.java    From Smack with Apache License 2.0 6 votes vote down vote up
@Test
public void testResultSyncPoint() throws Exception {
    final String result = "Hip Hip Hurrary!!111!";
    final CyclicBarrier barrier = new CyclicBarrier(2);
    final ResultSyncPoint<String, Exception> rsp = new ResultSyncPoint<>();
    Async.go(new Async.ThrowingRunnable() {
        @Override
        public void runOrThrow() throws InterruptedException, BrokenBarrierException {
            barrier.await();
            rsp.signal(result);
        }
    });
    barrier.await();
    String receivedResult = rsp.waitForResult(60 * 1000);
    assertEquals(result, receivedResult);
}
 
Example 2
Source File: TestJobImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout=20000)
public void testKilledDuringCommit() throws Exception {
  Configuration conf = new Configuration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  AsyncDispatcher dispatcher = new AsyncDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  CyclicBarrier syncBarrier = new CyclicBarrier(2);
  OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();

  JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
  completeJobTasks(job);
  assertJobState(job, JobStateInternal.COMMITTING);

  syncBarrier.await();
  job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
  assertJobState(job, JobStateInternal.KILLED);
  dispatcher.stop();
  commitHandler.stop();
}
 
Example 3
Source File: CyclicBarrierTest.java    From j2objc with Apache License 2.0 6 votes vote down vote up
/**
 * A reset of an active barrier causes waiting threads to throw
 * BrokenBarrierException
 */
public void testReset_BrokenBarrier() throws InterruptedException {
    final CyclicBarrier c = new CyclicBarrier(3);
    final CountDownLatch pleaseReset = new CountDownLatch(2);
    Thread t1 = new ThreadShouldThrow(BrokenBarrierException.class) {
        public void realRun() throws Exception {
            pleaseReset.countDown();
            c.await();
        }};
    Thread t2 = new ThreadShouldThrow(BrokenBarrierException.class) {
        public void realRun() throws Exception {
            pleaseReset.countDown();
            c.await();
        }};

    t1.start();
    t2.start();
    await(pleaseReset);

    awaitNumberWaiting(c, 2);
    c.reset();
    awaitTermination(t1);
    awaitTermination(t2);
}
 
Example 4
Source File: TransferQueueTest.java    From cyberduck with GNU General Public License v3.0 6 votes vote down vote up
@Test
public void testConcurrent() throws Exception {
    final TransferQueue queue = new TransferQueue(1);
    final DownloadTransfer transfer = new DownloadTransfer(new Host(new TestProtocol()), new Path("/t", EnumSet.of(Path.Type.directory)), null);
    queue.add(transfer, new DisabledProgressListener());
    final AtomicBoolean added = new AtomicBoolean();
    final CyclicBarrier wait = new CyclicBarrier(2);
    new Thread(new Runnable() {
        @Override
        public void run() {
            queue.add(new DownloadTransfer(new Host(new TestProtocol()), new Path("/t", EnumSet.of(Path.Type.directory)), null), new DisabledProgressListener());
            added.set(true);
            try {
                wait.await();
            }
            catch(InterruptedException | BrokenBarrierException e) {
                fail();
            }
        }
    }).start();
    assertFalse(added.get());
    queue.remove(transfer);
    wait.await();
    assertTrue(added.get());
}
 
Example 5
Source File: PluggableJobTest.java    From google-cloud-eclipse with Apache License 2.0 6 votes vote down vote up
@Test
public void testFutureCancelingCancelsJob() throws InterruptedException, BrokenBarrierException {
  final CyclicBarrier barrier = new CyclicBarrier(2);
  PluggableJob<Object> job = new PluggableJob<>("name", () -> {
    try {
      barrier.await(); // job started: should release main thread
      barrier.await(); // wait for future cancel
    } catch (InterruptedException | BrokenBarrierException ex) {
    }
    return barrier;
  });
  job.schedule();
  barrier.await(); // wait until job started
  assertEquals("Should be RUNNING", Job.RUNNING, job.getState());
  job.getFuture().cancel(true);
  barrier.await(); // job should now finish but report as cancelled
  job.join();
  assertNotNull("Job should be finished", job.getResult());
  assertEquals("Should be CANCEL", IStatus.CANCEL, job.getResult().getSeverity());
}
 
Example 6
Source File: JaxwsClientCallbackTest.java    From cxf with Apache License 2.0 5 votes vote down vote up
@Test
public void testHandleCancellationCallback() throws Exception {
    final CyclicBarrier barrier = new CyclicBarrier(2);
    schedule(barrier, () -> callback.cancel(true));
    barrier.await(5, TimeUnit.SECONDS);

    assertThrows(InterruptedException.class, () -> callback.get());
    assertThrows(InterruptedException.class, () -> callback.get(10, TimeUnit.MILLISECONDS));
    assertThat(callback.isCancelled(), equalTo(true));
    assertThat(callback.isDone(), equalTo(true));
}
 
Example 7
Source File: TestThreadCpuTimeEvent.java    From TencentKona-8 with GNU General Public License v2.0 5 votes vote down vote up
static void testCompareWithMXBean() throws Throwable {
    Duration testRunTime = Duration.ofMillis(eventPeriodMillis * cpuConsumerRunFactor);
    CyclicBarrier barrier = new CyclicBarrier(2);
    CpuConsumingThread thread = new CpuConsumingThread(testRunTime, barrier);
    thread.start();

    List<RecordedEvent> beforeEvents = generateEvents(2, barrier);
    verifyPerThreadInvariant(beforeEvents, cpuConsumerThreadName);

    // Run a second single pass
    barrier.await();
    barrier.await();

    ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
    Duration cpuTime = Duration.ofNanos(bean.getThreadCpuTime(thread.getId()));
    Duration userTime = Duration.ofNanos(bean.getThreadUserTime(thread.getId()));

    // Check something that should hold even in the presence of unfortunate scheduling
    Asserts.assertGreaterThanOrEqual(cpuTime.toMillis(), eventPeriodMillis);
    Asserts.assertGreaterThanOrEqual(userTime.toMillis(), eventPeriodMillis);

    Duration systemTimeBefore = getAccumulatedTime(beforeEvents, cpuConsumerThreadName, "system");
    Duration userTimeBefore = getAccumulatedTime(beforeEvents, cpuConsumerThreadName, "user");
    Duration cpuTimeBefore = userTimeBefore.plus(systemTimeBefore);

    Asserts.assertLessThan(cpuTimeBefore, cpuTime);
    Asserts.assertLessThan(userTimeBefore, userTime);
    Asserts.assertGreaterThan(cpuTimeBefore, Duration.ZERO);

    thread.interrupt();
    thread.join();
}
 
Example 8
Source File: SerializationDeadlock.java    From dragonwell8_jdk with GNU General Public License v2.0 5 votes vote down vote up
public static void main(final String[] args) throws Exception {
    // Test for Vector serialization deadlock
    final Vector<Object> v1 = new Vector<>();
    final Vector<Object> v2 = new Vector<>();
    final TestBarrier testStart = new TestBarrier(3);

    // Populate the vectors so that they refer to each other
    v1.add(testStart);
    v1.add(v2);
    v2.add(testStart);
    v2.add(v1);

    final CyclicBarrier testEnd = new CyclicBarrier(3);
    final TestThread t1 = new TestThread(v1, testEnd);
    final TestThread t2 = new TestThread(v2, testEnd);

    t1.start();
    t2.start();

    // Wait for both test threads to have initiated serialization
    // of the 'testStart' object (and hence of both 'v1' and 'v2')
    testStart.await();

    // Wait for both test threads to successfully finish serialization
    // of 'v1' and 'v2'.
    System.out.println("Waiting for Vector serialization to complete ...");
    System.out.println("(This test will hang if serialization deadlocks)");
    testEnd.await();
    System.out.println("Test PASSED: serialization completed successfully");

    TestThread.handleExceptions();
}
 
Example 9
Source File: TestThreadCpuTimeEvent.java    From TencentKona-8 with GNU General Public License v2.0 5 votes vote down vote up
static void testEventAtThreadExit() throws Throwable {
    Recording recording = new Recording();

    recording.enable(EventNames.ThreadCPULoad).withPeriod(Duration.ofHours(10));
    recording.start();

    Duration testRunTime = Duration.ofMillis(eventPeriodMillis * cpuConsumerRunFactor);
    CyclicBarrier barrier = new CyclicBarrier(2);
    CpuConsumingThread thread = new CpuConsumingThread(testRunTime, barrier);

    // Run a single pass
    thread.start();
    barrier.await();
    barrier.await();

    thread.interrupt();
    thread.join();

    recording.stop();

    List<RecordedEvent> events = Events.fromRecording(recording);
    verifyPerThreadInvariant(events, cpuConsumerThreadName);

    int exitingCount = 0;
    for (RecordedEvent event : events) {
        RecordedThread eventThread = event.getThread();
        if (eventThread.getJavaName().equals(cpuConsumerThreadName)) {
            exitingCount++;
        }
    }
    Asserts.assertEquals(exitingCount, 1);
}
 
Example 10
Source File: ThreadUtils.java    From groovy with Apache License 2.0 5 votes vote down vote up
public static void await(CyclicBarrier barrier) {
    try {
        barrier.await();
    } catch (Exception e) {
        throw new Error(e);
    }
}
 
Example 11
Source File: MemoryConflictProvoker.java    From openjdk-jdk8u-backup with GNU General Public License v2.0 5 votes vote down vote up
public MemoryConflictProvoker(Object monitor) {
    super(monitor);
    barrier = new CyclicBarrier(2);
    conflictingThread = () -> {
        try {
            barrier.await();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        for (int i = 0; i < MemoryConflictProvoker.INNER_ITERATIONS; i++) {
            MemoryConflictProvoker.field++;
        }
    };
}
 
Example 12
Source File: SimpleExecutor.java    From Oceanus with Apache License 2.0 5 votes vote down vote up
private Integer doUpdate(ExecuteHandler<Integer> hanlder,
		Set<RouteTarget> targets, StatementContext context,
		ExecuteCallback callback) throws SQLException {
	Integer sumResult = 0;
	List<FutureUpdateExecuteCallback> futureHolders = new ArrayList<FutureUpdateExecuteCallback>();
	int i = 0;
	int n = (targets.size() + 1);
	CyclicBarrier barrier = new CyclicBarrier(n);
	boolean asyn = (targets.size() > 1);
	boolean autoClosed=(targets.size() > 1);

	for (RouteTarget target : targets) {
		FutureUpdateExecuteCallback futureCallback = this
				.createUpdateCallback(target.getBatchItem().getMatchTable()
						, barrier, callback, asyn,autoClosed);
		futureHolders.add(futureCallback);
		Integer result = hanlder.handle(target, context, futureCallback);
		if (result != null) {
			sumResult += result;
		}
	}
	if (asyn) {
		try {
			barrier.await();
			for (i = 0; i < futureHolders.size(); i++) {
				sumResult += (Integer) futureHolders.get(i).future.get();
			}
		} catch(BrokenBarrierException bbe){
			logger.error("not enough available thread to excute task! check your thread pool size!", bbe);
			throw new ShardException("not enough available thread to excute task!", bbe);
		} catch (Exception e) {
			logger.error("asyn execute error!targets=" + targets, e);
			throw new ShardException("asyn execute error!", e);
		}

	}
	return sumResult;
}
 
Example 13
Source File: ConcurrentMapOpsTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private static final void awaitBarrier(CyclicBarrier barrier) {
  try {
    barrier.await();
  } catch (Exception e) {
    fail("unexpected exception in barrier await", e);
  }
}
 
Example 14
Source File: TestClockSpec.java    From swim with Apache License 2.0 4 votes vote down vote up
@Test
public void concurrentlyScheduleTimers() {
  final int threadCount = 8;
  final int timerCount = 1000;
  final TestClock clock = new TestClock(1, 2);
  final CyclicBarrier barrier = new CyclicBarrier(threadCount);
  final CountDownLatch shutdown = new CountDownLatch(threadCount);
  try {
    clock.start();
    for (int i = 0; i < threadCount; i += 1) {
      final Thread thread = new Thread() {
        @Override
        public void run() {
          try {
            final CountDownLatch fire = new CountDownLatch(timerCount);
            barrier.await();
            for (int j = 0; j < timerCount; j += 1) {
              final int k = j;
              // A 1ms timer deadline will usually round up to 2ms, which equals
              // one full revolution of the clock.  By continually setting timers
              // for several clock revolutions, it becomes highly likely that
              // we will add a timer to a clock phase while the clock thread
              // concurrently executes the same phase.
              clock.setTimer(1L, new AbstractTimer() {
                @Override
                public void runTimer() {
                  assertEquals(timerCount - fire.getCount(), k);
                  fire.countDown();
                }
              });
            }
            clock.await(fire, 5000);
            shutdown.countDown();
          } catch (Throwable error) {
            throw new TestException(error);
          }
        }
      };
      thread.start();
    }
    clock.await(shutdown, 5000);
  } finally {
    clock.stop();
  }
}
 
Example 15
Source File: TestContainerLauncherImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)
public void testContainerCleaned() throws Exception {
  LOG.info("STARTING testContainerCleaned");
  
  CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
  CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);

  AppContext mockContext = mock(AppContext.class);
  
  EventHandler mockEventHandler = mock(EventHandler.class);
  when(mockContext.getEventHandler()).thenReturn(mockEventHandler);

  ContainerManagementProtocolClient mockCM =
      new ContainerManagerForTest(startLaunchBarrier, completeLaunchBarrier);
  ContainerLauncherImplUnderTest ut =
      new ContainerLauncherImplUnderTest(mockContext, mockCM);
  
  Configuration conf = new Configuration();
  ut.init(conf);
  ut.start();
  try {
    ContainerId contId = makeContainerId(0l, 0, 0, 1);
    TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
    String cmAddress = "127.0.0.1:8000";
    StartContainersResponse startResp =
      recordFactory.newRecordInstance(StartContainersResponse.class);
    startResp.setAllServicesMetaData(serviceResponse);
    
   
    LOG.info("inserting launch event");
    ContainerRemoteLaunchEvent mockLaunchEvent = 
      mock(ContainerRemoteLaunchEvent.class);
    when(mockLaunchEvent.getType())
      .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
    when(mockLaunchEvent.getContainerID())
      .thenReturn(contId);
    when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
    when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
    when(mockLaunchEvent.getContainerToken()).thenReturn(
        createNewContainerToken(contId, cmAddress));
    ut.handle(mockLaunchEvent);
    
    startLaunchBarrier.await();
    
         
    LOG.info("inserting cleanup event");
    ContainerLauncherEvent mockCleanupEvent = 
      mock(ContainerLauncherEvent.class);
    when(mockCleanupEvent.getType())
      .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
    when(mockCleanupEvent.getContainerID())
      .thenReturn(contId);
    when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
    when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
    ut.handle(mockCleanupEvent);

    completeLaunchBarrier.await();
   
    ut.waitForPoolToIdle();
    
    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
    verify(mockEventHandler, atLeast(2)).handle(arg.capture());
    boolean containerCleaned = false;
    
    for (int i =0; i < arg.getAllValues().size(); i++) {
      LOG.info(arg.getAllValues().get(i).toString());
      Event currentEvent = arg.getAllValues().get(i);
      if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
        containerCleaned = true;
      }
    }
    assert(containerCleaned);
    
  } finally {
    ut.stop();
  }
}
 
Example 16
Source File: TestIndexWriterThreadsToSegments.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testSegmentCountOnFlushRandom() throws Exception {
  Directory dir = newFSDirectory(createTempDir());
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));

  // Never trigger flushes (so we only flush on getReader):
  iwc.setMaxBufferedDocs(100000000);
  iwc.setRAMBufferSizeMB(-1);

  // Never trigger merges (so we can simplistically count flushed segments):
  iwc.setMergePolicy(NoMergePolicy.INSTANCE);

  final IndexWriter w = new IndexWriter(dir, iwc);

  // How many threads are indexing in the current cycle:
  final AtomicInteger indexingCount = new AtomicInteger();

  // How many threads we will use on each cycle:
  final AtomicInteger maxThreadCount = new AtomicInteger();

  CheckSegmentCount checker = new CheckSegmentCount(w, maxThreadCount, indexingCount);

  // We spin up 10 threads up front, but then in between flushes we limit how many can run on each iteration
  final int ITERS = TEST_NIGHTLY ? 300 : 10;
  Thread[] threads = new Thread[MAX_THREADS_AT_ONCE];

  // We use this to stop all threads once they've indexed their docs in the current iter, and pull a new NRT reader, and verify the
  // segment count:
  final CyclicBarrier barrier = new CyclicBarrier(MAX_THREADS_AT_ONCE, checker);
  
  for(int i=0;i<threads.length;i++) {
    threads[i] = new Thread() {
        @Override
        public void run() {
          try {
            for(int iter=0;iter<ITERS;iter++) {
              if (indexingCount.incrementAndGet() <= maxThreadCount.get()) {
                if (VERBOSE) {
                  System.out.println("TEST: " + Thread.currentThread().getName() + ": do index");
                }

                // We get to index on this cycle:
                Document doc = new Document();
                doc.add(new TextField("field", "here is some text that is a bit longer than normal trivial text", Field.Store.NO));
                for(int j=0;j<200;j++) {
                  w.addDocument(doc);
                }
              } else {
                // We lose: no indexing for us on this cycle
                if (VERBOSE) {
                  System.out.println("TEST: " + Thread.currentThread().getName() + ": don't index");
                }
              }
              barrier.await();
            }
          } catch (Exception e) {
            throw new RuntimeException(e);
          }
        }
      };
    threads[i].start();
  }

  for(Thread t : threads) {
    t.join();
  }

  IOUtils.close(checker, w, dir);
}
 
Example 17
Source File: AbstractByteBufTest.java    From netty4.0.27Learn with Apache License 2.0 4 votes vote down vote up
private void testBytesInArrayMultipleThreads(final boolean slice) throws Exception {
    final byte[] bytes = new byte[8];
    random.nextBytes(bytes);

    final ByteBuf buffer = releaseLater(newBuffer(8));
    buffer.writeBytes(bytes);
    final AtomicReference<Throwable> cause = new AtomicReference<Throwable>();
    final CountDownLatch latch = new CountDownLatch(60000);
    final CyclicBarrier barrier = new CyclicBarrier(11);
    for (int i = 0; i < 10; i++) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                while (cause.get() == null && latch.getCount() > 0) {
                    ByteBuf buf;
                    if (slice) {
                        buf = buffer.slice();
                    } else {
                        buf = buffer.duplicate();
                    }

                    byte[] array = new byte[8];
                    buf.readBytes(array);

                    assertArrayEquals(bytes, array);

                    Arrays.fill(array, (byte) 0);
                    buf.getBytes(0, array);
                    assertArrayEquals(bytes, array);

                    latch.countDown();
                }
                try {
                    barrier.await();
                } catch (Exception e) {
                    // ignore
                }
            }
        }).start();
    }
    latch.await(10, TimeUnit.SECONDS);
    barrier.await(5, TimeUnit.SECONDS);
    assertNull(cause.get());
}
 
Example 18
Source File: SimpleProducerConsumerTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test(dataProvider = "batch")
public void testConcurrentConsumerReceiveWhileReconnect(int batchMessageDelayMs) throws Exception {
    final int recvQueueSize = 100;
    final int numConsumersThreads = 10;

    String subName = UUID.randomUUID().toString();
    final Consumer<byte[]> consumer = pulsarClient.newConsumer()
            .topic("persistent://my-property/my-ns/my-topic7").subscriptionName(subName)
            .startMessageIdInclusive()
            .receiverQueueSize(recvQueueSize).subscribe();
    ExecutorService executor = Executors.newCachedThreadPool();

    final CyclicBarrier barrier = new CyclicBarrier(numConsumersThreads + 1);
    for (int i = 0; i < numConsumersThreads; i++) {
        executor.submit((Callable<Void>) () -> {
            barrier.await();
            consumer.receive();
            return null;
        });
    }

    barrier.await();
    // there will be 10 threads calling receive() from the same consumer and will block
    Thread.sleep(100);

    // we restart the broker to reconnect
    restartBroker();
    Thread.sleep(2000);

    // publish 100 messages so that the consumers blocked on receive() will now get the messages
    ProducerBuilder<byte[]> producerBuilder = pulsarClient.newProducer()
            .topic("persistent://my-property/my-ns/my-topic7");

    if (batchMessageDelayMs != 0) {
        producerBuilder.batchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
        producerBuilder.batchingMaxMessages(5);
        producerBuilder.enableBatching(true);
    }
    Producer<byte[]> producer = producerBuilder.create();
    for (int i = 0; i < recvQueueSize; i++) {
        String message = "my-message-" + i;
        producer.send(message.getBytes());
    }
    Thread.sleep(500);

    ConsumerImpl<byte[]> consumerImpl = (ConsumerImpl<byte[]>) consumer;
    // The available permits should be 10 and num messages in the queue should be 90
    Assert.assertEquals(consumerImpl.getAvailablePermits(), numConsumersThreads);
    Assert.assertEquals(consumerImpl.numMessagesInQueue(), recvQueueSize - numConsumersThreads);

    barrier.reset();
    for (int i = 0; i < numConsumersThreads; i++) {
        executor.submit((Callable<Void>) () -> {
            barrier.await();
            consumer.receive();
            return null;
        });
    }
    barrier.await();
    Thread.sleep(100);

    // The available permits should be 20 and num messages in the queue should be 80
    Assert.assertEquals(consumerImpl.getAvailablePermits(), numConsumersThreads * 2);
    Assert.assertEquals(consumerImpl.numMessagesInQueue(), recvQueueSize - (numConsumersThreads * 2));

    // clear the queue
    while (true) {
        Message<byte[]> msg = consumer.receive(1, TimeUnit.SECONDS);
        if (msg == null) {
            break;
        }
    }

    // The available permits should be 0 and num messages in the queue should be 0
    Assert.assertEquals(consumerImpl.getAvailablePermits(), 0);
    Assert.assertEquals(consumerImpl.numMessagesInQueue(), 0);

    barrier.reset();
    for (int i = 0; i < numConsumersThreads; i++) {
        executor.submit((Callable<Void>) () -> {
            barrier.await();
            consumer.receive();
            return null;
        });
    }
    barrier.await();
    // we again make 10 threads call receive() and get blocked
    Thread.sleep(100);

    restartBroker();
    Thread.sleep(2000);

    // The available permits should be 10 and num messages in the queue should be 90
    Assert.assertEquals(consumerImpl.getAvailablePermits(), numConsumersThreads);
    Assert.assertEquals(consumerImpl.numMessagesInQueue(), recvQueueSize - numConsumersThreads);
    consumer.close();
}
 
Example 19
Source File: DelayedClientTransportTest.java    From grpc-nebula-java with Apache License 2.0 4 votes vote down vote up
@Test
public void reprocess_newStreamRacesWithReprocess() throws Exception {
  final CyclicBarrier barrier = new CyclicBarrier(2);
  // In both phases, we only expect the first pickSubchannel() call to block on the barrier.
  final AtomicBoolean nextPickShouldWait = new AtomicBoolean(true);
  ///////// Phase 1: reprocess() twice with the same picker
  SubchannelPicker picker = mock(SubchannelPicker.class);

  doAnswer(new Answer<PickResult>() {
      @Override
      @SuppressWarnings("CatchAndPrintStackTrace")
      public PickResult answer(InvocationOnMock invocation) throws Throwable {
        if (nextPickShouldWait.compareAndSet(true, false)) {
          try {
            barrier.await();
            return PickResult.withNoResult();
          } catch (Exception e) {
            e.printStackTrace();
          }
        }
        return PickResult.withNoResult();
      }
  }).when(picker).pickSubchannel(any(PickSubchannelArgs.class));

  // Because there is no pending stream yet, it will do nothing but save the picker.
  delayedTransport.reprocess(picker);
  verify(picker, never()).pickSubchannel(any(PickSubchannelArgs.class));

  Thread sideThread = new Thread("sideThread") {
      @Override
      public void run() {
        // Will call pickSubchannel and wait on barrier
        delayedTransport.newStream(method, headers, callOptions);
      }
    };
  sideThread.start();

  PickSubchannelArgsImpl args = new PickSubchannelArgsImpl(method, headers, callOptions);
  PickSubchannelArgsImpl args2 = new PickSubchannelArgsImpl(method, headers2, callOptions);

  // Is called from sideThread
  verify(picker, timeout(5000)).pickSubchannel(args);

  // Because stream has not been buffered (it's still stuck in newStream()), this will do nothing,
  // but incrementing the picker version.
  delayedTransport.reprocess(picker);
  verify(picker).pickSubchannel(args);

  // Now let the stuck newStream() through
  barrier.await(5, TimeUnit.SECONDS);

  sideThread.join(5000);
  assertFalse("sideThread should've exited", sideThread.isAlive());
  // newStream() detects that there has been a new picker while it's stuck, thus will pick again.
  verify(picker, times(2)).pickSubchannel(args);

  barrier.reset();
  nextPickShouldWait.set(true);

  ////////// Phase 2: reprocess() with a different picker
  // Create the second stream
  Thread sideThread2 = new Thread("sideThread2") {
      @Override
      public void run() {
        // Will call pickSubchannel and wait on barrier
        delayedTransport.newStream(method, headers2, callOptions);
      }
    };
  sideThread2.start();
  // The second stream will see the first picker
  verify(picker, timeout(5000)).pickSubchannel(args2);
  // While the first stream won't use the first picker any more.
  verify(picker, times(2)).pickSubchannel(args);

  // Now use a different picker
  SubchannelPicker picker2 = mock(SubchannelPicker.class);
  when(picker2.pickSubchannel(any(PickSubchannelArgs.class)))
      .thenReturn(PickResult.withNoResult());
  delayedTransport.reprocess(picker2);
  // The pending first stream uses the new picker
  verify(picker2).pickSubchannel(args);
  // The second stream is still pending in creation, doesn't use the new picker.
  verify(picker2, never()).pickSubchannel(args2);

  // Now let the second stream finish creation
  barrier.await(5, TimeUnit.SECONDS);

  sideThread2.join(5000);
  assertFalse("sideThread2 should've exited", sideThread2.isAlive());
  // The second stream should see the new picker
  verify(picker2, timeout(5000)).pickSubchannel(args2);

  // Wrapping up
  verify(picker, times(2)).pickSubchannel(args);
  verify(picker).pickSubchannel(args2);
  verify(picker2).pickSubchannel(args);
  verify(picker2).pickSubchannel(args);
}
 
Example 20
Source File: CheckpointFreeListTest.java    From ignite with Apache License 2.0 2 votes vote down vote up
/**
 * Note: Test assumes that PDS size didn't change between the first checkpoint and after several node stops.
 * It's not true anymore with free-list caching since the only final free-list state is persisted on checkpoint.
 * Some changed, but currently empty buckets are not persisted and PDS size is smaller after the first checkpoint.
 * Test makes sense only with disabled caching.
 *
 * @throws Exception if fail.
 */
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_PAGES_LIST_DISABLE_ONHEAP_CACHING, value = "true")
public void testRestoreFreeListCorrectlyAfterRandomStop() throws Exception {
    IgniteEx ignite0 = startGrid(0);
    ignite0.cluster().active(true);

    Random random = new Random();

    List<T2<Integer, byte[]>> cachedEntry = new ArrayList<>();

    IgniteCache<Integer, Object> cache = ignite0.cache(CACHE_NAME);

    for (int j = 0; j < CACHE_SIZE; j++) {
        byte[] val = new byte[random.nextInt(SF.apply(3072))];

        cache.put(j, val);

        cachedEntry.add(new T2<>(j, val));
    }

    Collections.shuffle(cachedEntry);

    //Remove half of entries.
    Collection<T2<Integer, byte[]>> entriesToRemove = cachedEntry.stream()
        .limit(cachedEntry.size() / 2)
        .collect(Collectors.toCollection(ConcurrentLinkedQueue::new));

    entriesToRemove.forEach(t2 -> cache.remove(t2.get1()));

    //During removing of entries free list grab a lot of free pages to itself so will do put/remove again for stabilization of free pages.
    entriesToRemove.forEach(t2 -> cache.put(t2.get1(), t2.get2()));

    entriesToRemove.forEach(t2 -> cache.remove(t2.get1()));

    forceCheckpoint();

    Path cacheFolder = Paths.get(U.defaultWorkDirectory(),
        DFLT_STORE_DIR,
        ignite0.name().replaceAll("\\.", "_"),
        CACHE_DIR_PREFIX + CACHE_NAME
    );

    Optional<Long> totalPartSizeBeforeStop = totalPartitionsSize(cacheFolder);

    CyclicBarrier nodeStartBarrier = new CyclicBarrier(2);

    int approximateIterationCount = SF.applyLB(10, 6);

    //Approximate count of entries to put per one iteration.
    int iterationDataCount = entriesToRemove.size() / approximateIterationCount;

    startAsyncPutThread(entriesToRemove, nodeStartBarrier);

    //Will stop node during put data several times.
    while (true) {
        stopGrid(0, true);

        ignite0 = startGrid(0);

        ignite0.cluster().active(true);

        if (entriesToRemove.isEmpty())
            break;

        //Notify put thread that node successfully started.
        nodeStartBarrier.await();
        nodeStartBarrier.reset();

        int awaitSize = entriesToRemove.size() - iterationDataCount;

        waitForCondition(() -> entriesToRemove.size() < awaitSize || entriesToRemove.size() == 0, 20000);
    }

    forceCheckpoint();

    Optional<Long> totalPartSizeAfterRestore = totalPartitionsSize(cacheFolder);

    //It allow that size after repeated put operations should be not more than on 15%(heuristic value) greater than before operations. In fact, it should not be multiplied in twice every time.
    long correctedRestoreSize = totalPartSizeAfterRestore.get() - (long)(totalPartSizeBeforeStop.get() * 0.15);

    assertTrue("Size after repeated put operations should be not more than on 15% greater. " +
            "Size before = " + totalPartSizeBeforeStop.get() + ", Size after = " + totalPartSizeAfterRestore.get(),
        totalPartSizeBeforeStop.get() > correctedRestoreSize);
}