java.util.concurrent.atomic.AtomicLong Java Examples

The following examples show how to use java.util.concurrent.atomic.AtomicLong. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InstanceExchangeFilterFunctionsTest.java    From spring-boot-admin with Apache License 2.0 6 votes vote down vote up
@Test
void should_retry_using_default() {
	InstanceExchangeFilterFunction filter = InstanceExchangeFilterFunctions.retry(1, emptyMap());

	ClientRequest request = ClientRequest.create(HttpMethod.GET, URI.create("/test")).build();
	ClientResponse response = ClientResponse.create(HttpStatus.OK).build();

	AtomicLong invocationCount = new AtomicLong(0L);
	ExchangeFunction exchange = (r) -> Mono.fromSupplier(() -> {
		if (invocationCount.getAndIncrement() == 0) {
			throw new IllegalStateException("Test");
		}
		return response;
	});

	StepVerifier.create(filter.filter(INSTANCE, request, exchange)).expectNext(response).verifyComplete();
	assertThat(invocationCount.get()).isEqualTo(2);
}
 
Example #2
Source File: ConnectionLimitHandler.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception
{
    counter.decrementAndGet();
    InetAddress address = ((InetSocketAddress) ctx.channel().remoteAddress()).getAddress();

    AtomicLong count = connectionsPerClient.get(address);
    if (count != null)
    {
        if (count.decrementAndGet() <= 0)
        {
            connectionsPerClient.remove(address);
        }
    }
    ctx.fireChannelInactive();
}
 
Example #3
Source File: SessionMonitorMBeanImpl.java    From khan-session with GNU Lesser General Public License v2.1 6 votes vote down vote up
/**
 * SessionMBeanImpl
 *
 * @throws javax.management.NotCompliantMBeanException
 */
public SessionMonitorMBeanImpl(KhanSessionManager sessionManager)
        throws NotCompliantMBeanException {
    super(SessionMonitorMBean.class);

    this.sessionManager = sessionManager;

    samplingManager = new SampledStatisticManager();

    sessionsCreatedStatistic = new SimpleCounterImpl();
    sessionsCreatedSampled = createSampledStatistic(sessionsCreatedStatistic);
    sessionsCreated = new AtomicLong();

    duplicatedLoginStatistic = new SimpleCounterImpl();
    duplicatedLoginSampled = createSampledStatistic(duplicatedLoginStatistic);
    duplicatedLogin = new AtomicLong();

    sessionsDestroyedStatistic = new SimpleCounterImpl();
    sessionsDestroyedSampled = createSampledStatistic(sessionsDestroyedStatistic);
    sessionsDestroyed = new AtomicLong();

    if (log.isDebugEnabled()) {
        log.debug("Session Monitor MBean Constructed");
    }
}
 
Example #4
Source File: PsiManagerImpl.java    From consulo with Apache License 2.0 6 votes vote down vote up
public void dropResolveCacheRegularly(@Nonnull ProgressIndicator indicator) {
  indicator = ProgressWrapper.unwrap(indicator);
  if (indicator instanceof ProgressIndicatorEx) {
    ((ProgressIndicatorEx)indicator).addStateDelegate(new AbstractProgressIndicatorExBase() {
      private final AtomicLong lastClearedTimeStamp = new AtomicLong();

      @Override
      public void setFraction(double fraction) {
        long current = System.currentTimeMillis();
        long last = lastClearedTimeStamp.get();
        if (current - last >= 500 && lastClearedTimeStamp.compareAndSet(last, current)) {
          // fraction is changed when each file is processed =>
          // resolve caches used when searching in that file are likely to be not needed anymore
          dropResolveCaches();
        }
      }
    });
  }
}
 
Example #5
Source File: HddsVolumeChecker.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Check a single volume asynchronously, returning a {@link ListenableFuture}
 * that can be used to retrieve the final result.
 * <p>
 * If the volume cannot be referenced then it is already closed and
 * cannot be checked. No error is propagated to the callback.
 *
 * @param volume   the volume that is to be checked.
 * @param callback callback to be invoked when the volume check completes.
 * @return true if the check was scheduled and the callback will be invoked.
 * false otherwise.
 */
public boolean checkVolume(final HddsVolume volume, Callback callback) {
  if (volume == null) {
    LOG.debug("Cannot schedule check on null volume");
    return false;
  }

  Optional<ListenableFuture<VolumeCheckResult>> olf =
      delegateChecker.schedule(volume, null);
  if (olf.isPresent()) {
    numVolumeChecks.incrementAndGet();
    Futures.addCallback(olf.get(),
        new ResultHandler(volume, new HashSet<>(), new HashSet<>(),
            new AtomicLong(1), callback),
        checkVolumeResultHandlerExecutorService
    );
    return true;
  }
  return false;
}
 
Example #6
Source File: PLUG.java    From OSPREY3 with GNU General Public License v2.0 6 votes vote down vote up
public void pruneTriples(PruningMatrix pmat, double tolerance, TaskExecutor tasks) {

		// count unpruned triple
		AtomicLong numTriples = new AtomicLong(0);
		pmat.forEachUnprunedTriple((pos1, rc1, pos2, rc2, pos3, rc3) -> {
			numTriples.incrementAndGet();
			return PruningMatrix.IteratorCommand.Continue;
		});
		Progress progress = new Progress(numTriples.get());

		// try to prune each triple
		pmat.forEachUnprunedTriple((pos1, rc1, pos2, rc2, pos3, rc3) -> {
			tasks.submit(
				() -> shouldPruneTuple(new RCTuple(pos1, rc1, pos2, rc2, pos3, rc3), tolerance),
				(shouldPrune) -> {
					if (shouldPrune) {
						pmat.pruneTriple(pos1, rc1, pos2, rc2, pos3, rc3);
					}
					progress.incrementProgress();
				}
			);
			return PruningMatrix.IteratorCommand.Continue;
		});

		tasks.waitForFinish();
	}
 
Example #7
Source File: QLPattern.java    From QLExpress with Apache License 2.0 6 votes vote down vote up
public static QLMatchResult findMatchStatement(INodeTypeManager aManager,QLPatternNode pattern ,List<? extends IDataNode> nodes,int point) throws Exception{
	AtomicLong maxMatchPoint = new AtomicLong();
	AtomicLong maxDeep = new AtomicLong(1);
	QLMatchResultCache resultCache =new QLMatchResultCache(5);
	ArrayListCache<QLMatchResultTree> arrayListCache = new ArrayListCache<QLMatchResultTree>(50);
       MatchParamsPack staticParams = new MatchParamsPack(aManager, nodes, maxDeep, maxMatchPoint,resultCache,arrayListCache);
	QLMatchResult result  = findMatchStatementWithAddRootOptimizeStack(staticParams, pattern, point, true, 1);
	if(printStackDepth) {
           log.warn("递归堆栈深度:" + maxDeep.longValue() + "  重用QLMatchResult次数:" + resultCache.fetchCount
				+ "  新建QLMatchResult次数:" + resultCache.newCount + "  新建ArrayList数量:" + arrayListCache.newCount);

       }
	if(result == null || result.getMatchSize() == 0){
		throw new QLCompileException("程序错误,不满足语法规范,没有匹配到合适的语法,最大匹配致[0:" + (maxMatchPoint.longValue()-1) +"]");
	}else if(result != null && result.getMatchSize() != 1){
		throw new QLCompileException("程序错误,不满足语法规范,必须有一个根节点:" + pattern + ",最大匹配致[0:" + (maxMatchPoint.longValue()-1) +"]");
	}
	return result;
}
 
Example #8
Source File: TfidfVectorizer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray transform(List<String> tokens) {
    INDArray ret = Nd4j.create(1, vocabCache.numWords());

    Map<String, AtomicLong> counts = new HashMap<>();
    for (String token : tokens) {
        if (!counts.containsKey(token))
            counts.put(token, new AtomicLong(0));

        counts.get(token).incrementAndGet();
    }

    for (int i = 0; i < tokens.size(); i++) {
        int idx = vocabCache.indexOf(tokens.get(i));
        if (idx >= 0) {
            double tf_idf = tfidfWord(tokens.get(i), counts.get(tokens.get(i)).longValue(), tokens.size());
            //log.info("TF-IDF for word: {} -> {} / {} => {}", tokens.get(i), counts.get(tokens.get(i)).longValue(), tokens.size(), tf_idf);
            ret.putScalar(idx, tf_idf);
        }
    }
    return ret;
}
 
Example #9
Source File: JournalPendingMessageTest.java    From activemq-artemis with Apache License 2.0 6 votes vote down vote up
@Test
public void testScheduledStats() throws Exception {
   AtomicLong publishedMessageSize = new AtomicLong();

   Connection connection = cf.createConnection();
   connection.start();
   Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
   MessageProducer producer = session.createProducer(session.createQueue(defaultQueueName));
   producer.setDeliveryDelay(2000);
   producer.send(session.createTextMessage("test"));

   verifyPendingStats(defaultQueueName, 1, publishedMessageSize.get());
   verifyPendingDurableStats(defaultQueueName, 1, publishedMessageSize.get());
   verifyScheduledStats(defaultQueueName, 1, publishedMessageSize.get());

   consumeTestQueueMessages(1);

   verifyPendingStats(defaultQueueName, 0, 0);
   verifyPendingDurableStats(defaultQueueName, 0, 0);
   verifyScheduledStats(defaultQueueName, 0, 0);

   connection.close();
}
 
Example #10
Source File: BaseHandler.java    From bender with Apache License 2.0 6 votes vote down vote up
private static void updateOldest(AtomicLong max, long time) {
  while (true) {
    long curMax = max.get();

    /*
     * With time smaller value is older
     */
    if (curMax <= time) {
      return;
    }

    if (max.compareAndSet(curMax, time)) {
      return;
    }
  }
}
 
Example #11
Source File: TestBlockRetainedSizeBreakdown.java    From presto with Apache License 2.0 6 votes vote down vote up
private static void checkRetainedSize(Block block, boolean getRegionCreateNewObjects)
{
    AtomicLong objectSize = new AtomicLong();
    Object2LongOpenCustomHashMap<Object> trackedObjects = new Object2LongOpenCustomHashMap<>(new ObjectStrategy());

    BiConsumer<Object, Long> consumer = (object, size) -> {
        objectSize.addAndGet(size);
        trackedObjects.addTo(object, 1);
    };

    block.retainedBytesForEachPart(consumer);
    assertEquals(objectSize.get(), block.getRetainedSizeInBytes());

    Block copyBlock = block.getRegion(0, block.getPositionCount() / 2);
    copyBlock.retainedBytesForEachPart(consumer);
    assertEquals(objectSize.get(), block.getRetainedSizeInBytes() + copyBlock.getRetainedSizeInBytes());

    assertEquals(trackedObjects.getLong(block), 1);
    assertEquals(trackedObjects.getLong(copyBlock), 1);
    trackedObjects.remove(block);
    trackedObjects.remove(copyBlock);
    for (long value : trackedObjects.values()) {
        assertEquals(value, getRegionCreateNewObjects ? 1 : 2);
    }
}
 
Example #12
Source File: AtomicLongMap.java    From codebuff with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Removes and returns the value associated with {@code key}. If {@code key} is not in the map,
 * this method has no effect and returns zero.
 */
@CanIgnoreReturnValue
public long remove(K key) {
  AtomicLong atomic = map.get(key);
  if (atomic == null) {
    return 0L;
  }

  while (true) {
    long oldValue = atomic.get();
    if (oldValue == 0L || atomic.compareAndSet(oldValue, 0L)) {
      // only remove after setting to zero, to avoid concurrent updates
      map.remove(key, atomic);
      // succeed even if the remove fails, since the value was already adjusted
      return oldValue;
    }
  }
}
 
Example #13
Source File: JournalPendingMessageTest.java    From activemq-artemis with Apache License 2.0 5 votes vote down vote up
@Test
public void testQueueMessageSizeAfterConsumption() throws Exception {
   AtomicLong publishedMessageSize = new AtomicLong();

   publishTestQueueMessages(200, publishedMessageSize);
   verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
   verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());

   consumeTestQueueMessages(200);

   verifyPendingStats(defaultQueueName, 0, 0);
   verifyPendingDurableStats(defaultQueueName, 0, 0);
}
 
Example #14
Source File: DumbSorter.java    From antsdb with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Override
public Object run(VdmContext ctx, Parameters params, long pMaster) {
    AtomicLong counter = ctx.getCursorStats(makerId);
    MyCursor result = null;
    boolean success = false;
    try (Cursor cc = this.upstream.make(ctx, params, pMaster)) {
        result = new MyCursor(ctx.getHumpback(), getCursorMeta());
        Heap heap = result.getHeap();
        List<Item> items = new ArrayList<>();
        for (long pRecord = cc.next(); pRecord != 0; pRecord = cc.next()) {
            if (pRecord != 0) {
                Record.size(pRecord);
            }
            pRecord = Record.clone(heap, pRecord);
            Item item = new Item();
            item.pRecord = pRecord;
            item.key = getSortKey(ctx, heap, params, pRecord);
            items.add(item);
        }
        counter.addAndGet(items.size());
        MyComparator2 comp = new MyComparator2();
        comp.ctx = ctx;
        comp.heap = heap;
        comp.params = params;
        Collections.sort(items, comp);
        result.items = items;
        success = true;
        return result;
    }
    finally {
        if (!success && (result != null)) {
            _log.warn("unexpected close");
            result.close();
        }
    }
}
 
Example #15
Source File: MonoProcessorTest.java    From reactor-core with Apache License 2.0 5 votes vote down vote up
@Test
public void fluxCancelledByMonoProcessor() {
	AtomicLong cancelCounter = new AtomicLong();
	Flux.range(1, 10)
	    .doOnCancel(cancelCounter::incrementAndGet)
	    .publishNext()
	    .subscribe();

	assertThat(cancelCounter.get()).isEqualTo(1);
}
 
Example #16
Source File: PubsubEventModule.java    From attic-aurora with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
SubscriberExceptionHandler provideSubscriberExceptionHandler(StatsProvider statsProvider) {
  final AtomicLong subscriberExceptions = statsProvider.makeCounter(EXCEPTIONS_STAT);
  return (exception, context) -> {
    subscriberExceptions.incrementAndGet();
    log.error(
        "Failed to dispatch event to " + context.getSubscriberMethod() + ": " + exception,
        exception);
  };
}
 
Example #17
Source File: DelayFaultTest.java    From servicecomb-java-chassis with Apache License 2.0 5 votes vote down vote up
@Test
public void injectFaultVertxDelay() throws InterruptedException {
  ArchaiusUtils
      .setProperty("servicecomb.governance.Consumer._global.policy.fault.protocols.rest.delay.fixedDelay", "10");
  ArchaiusUtils
      .setProperty("servicecomb.governance.Consumer._global.policy.fault.protocols.rest.delay.percent", "100");

  assertEquals("10", DynamicProperty
      .getInstance("servicecomb.governance.Consumer._global.policy.fault.protocols.rest.delay.fixedDelay")
      .getString());
  assertEquals("100", DynamicProperty
      .getInstance("servicecomb.governance.Consumer._global.policy.fault.protocols.rest.delay.percent")
      .getString());

  DelayFault delayFault = new DelayFault();
  FaultParam faultParam = new FaultParam(1);
  Vertx vertx = VertxUtils.getOrCreateVertxByName("faultinjectionTest", null);
  faultParam.setVertx(vertx);

  Holder<String> resultHolder = new Holder<>();
  CountDownLatch latch = new CountDownLatch(1);
  delayFault.injectFault(invocation, faultParam, response -> {
    resultHolder.value = response.getResult();
    latch.countDown();
  });

  latch.await(10, TimeUnit.SECONDS);
  AtomicLong count = FaultInjectionUtil.getOperMetTotalReq("restMicroserviceQualifiedName12");
  assertEquals(1, count.get());
  assertEquals("success", resultHolder.value);
}
 
Example #18
Source File: SimpleProducer.java    From galaxy-sdk-java with Apache License 2.0 5 votes vote down vote up
public SimpleProducer(TalosProducerConfig producerConfig, String topicName,
    int partitionId, MessageService.Iface messageClientMock,
    TopicService.Iface topicClient, AtomicLong requestId,
    ScheduleInfoCache scheduleInfoCacheMock) throws GalaxyTalosException, TException{
  Utils.checkTopicName(topicName);
  getTopicInfo(topicClient, topicName, partitionId);
  this.producerConfig = producerConfig;
  this.messageClient = messageClientMock;
  this.clientId = Utils.generateClientId(SimpleProducer.class.getSimpleName());
  this.requestId = requestId;
  this.scheduleInfoCache = scheduleInfoCacheMock;
}
 
Example #19
Source File: StatUtils.java    From Mycat-Balance with Apache License 2.0 5 votes vote down vote up
/**
 * 记录统计量
 * 
 * @author tanyaowu
 * @param socketChannelId
 * @param msgCount
 * @param submitCount
 * @param msgSize
 */
public static void recordStat(ChannelContext channelContext, AtomicLong msgCount, AtomicLong submitCount, AtomicLong msgSize)
{
    synchronized (mapOfsocketChannelIdAndMsgCount)
    {
        mapOfsocketChannelIdAndMsgCount.put(channelContext, msgCount);
        mapOfsocketChannelIdAndSubmitCount.put(channelContext, submitCount);
        mapOfsocketChannelIdAndMsgSize.put(channelContext, msgSize);
    }
}
 
Example #20
Source File: DemoTester.java    From message-queue-java with MIT License 5 votes vote down vote up
public Producer(QueueStore queueStore, int number, long maxTimeStamp, int maxMsgNum, AtomicLong counter, ConcurrentMap<String, AtomicInteger> queueCounter) {
    this.counter = counter;
    this.maxMsgNum = maxMsgNum;
    this.queueCounter = queueCounter;
    this.number = number;
    this.queueStore = queueStore;
    this.maxTimeStamp = maxTimeStamp;
}
 
Example #21
Source File: OrderedConsumer.java    From spring-boot-starter-samples with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	
	DefaultMQPushConsumer consumer = new DefaultMQPushConsumer("example_group_name");

	consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);

	consumer.subscribe("TopicTest", "TagA || TagC || TagD");

	consumer.registerMessageListener(new MessageListenerOrderly() {

		AtomicLong consumeTimes = new AtomicLong(0);

		@Override
		public ConsumeOrderlyStatus consumeMessage(List<MessageExt> msgs, ConsumeOrderlyContext context) {
			context.setAutoCommit(false);
			System.out.printf(Thread.currentThread().getName() + " Receive New Messages: " + msgs + "%n");
			this.consumeTimes.incrementAndGet();
			if ((this.consumeTimes.get() % 2) == 0) {
				return ConsumeOrderlyStatus.SUCCESS;
			} else if ((this.consumeTimes.get() % 3) == 0) {
				return ConsumeOrderlyStatus.ROLLBACK;
			} else if ((this.consumeTimes.get() % 4) == 0) {
				return ConsumeOrderlyStatus.COMMIT;
			} else if ((this.consumeTimes.get() % 5) == 0) {
				context.setSuspendCurrentQueueTimeMillis(3000);
				return ConsumeOrderlyStatus.SUSPEND_CURRENT_QUEUE_A_MOMENT;
			}
			return ConsumeOrderlyStatus.SUCCESS;

		}
	});

	consumer.start();

	System.out.printf("Consumer Started.%n");
}
 
Example #22
Source File: CoreOutputStream.java    From crail with Apache License 2.0 5 votes vote down vote up
public CoreOutputStream(CoreNode file, long streamId, long writeHint) throws Exception {
	super(file, streamId, file.getCapacity());
	this.writeHint = Math.max(0, writeHint);
	this.inFlight = new AtomicLong(0);
	this.noOp = new CrailImmediateOperation(0);
	this.open = true;
	if (CrailConstants.DEBUG){
		LOG.info("CoreOutputStream, open, path " + file.getPath() + ", fd " + file.getFd() + ", streamId " + streamId + ", isDir " + file.getType().isDirectory() + ", writeHint " + this.writeHint);
	}
}
 
Example #23
Source File: TokenBucket.java    From Sentinel with Apache License 2.0 5 votes vote down vote up
public TokenBucket(long maxTokens, long intervalMillis) {
    if (maxTokens <= 0) {
        throw new IllegalArgumentException("maxTokens should > 0, but given: " + maxTokens);
    }
    if (intervalMillis < 1000) {
        throw new IllegalArgumentException("intervalMillis should be at least 1000, but given: " + intervalMillis);
    }
    this.maxTokens = maxTokens;
    this.intervalMillis = intervalMillis;
    this.nextUpdate = System.currentTimeMillis() / 1000 * 1000 + intervalMillis;
    this.tokens = new AtomicLong(maxTokens);
}
 
Example #24
Source File: FixedBoundaryVictoriaMetricsHistogram.java    From micrometer with Apache License 2.0 5 votes vote down vote up
public FixedBoundaryVictoriaMetricsHistogram() {
    this.zeros = new AtomicLong(0);
    this.lower = new AtomicLong(0);
    this.upper = new AtomicLong(0);
    this.sum = new DoubleAdder();

    this.values = new AtomicReferenceArray<>(BUCKETS_COUNT);
}
 
Example #25
Source File: IteratorTest.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    this.applyingIndex = new AtomicLong(0);
    this.closures = new ArrayList<>();
    for (int i = 0; i < 11; i++) {
        this.closures.add(new MockClosure());
        final LogEntry log = new LogEntry(EnumOutter.EntryType.ENTRY_TYPE_DATA);
        log.getId().setIndex(i);
        log.getId().setTerm(1);
        log.setData(ByteBuffer.allocate(i));
        Mockito.when(this.logManager.getEntry(i)).thenReturn(log);
    }
    this.iterImpl = new IteratorImpl(fsm, logManager, closures, 0L, 0L, 10L, applyingIndex);
    this.iter = new IteratorWrapper(iterImpl);
}
 
Example #26
Source File: AtomicLongTest.java    From j2objc with Apache License 2.0 5 votes vote down vote up
/**
 * compareAndSet in one thread enables another waiting for value
 * to succeed
 */
public void testCompareAndSetInMultipleThreads() throws Exception {
    final AtomicLong ai = new AtomicLong(1);
    Thread t = new Thread(new CheckedRunnable() {
        public void realRun() {
            while (!ai.compareAndSet(2, 3))
                Thread.yield();
        }});

    t.start();
    assertTrue(ai.compareAndSet(1, 2));
    t.join(LONG_DELAY_MS);
    assertFalse(t.isAlive());
    assertEquals(3, ai.get());
}
 
Example #27
Source File: SimpleMetricStore.java    From actframework with Apache License 2.0 5 votes vote down vote up
private void countOnce_(String name) {
    AtomicLong al = counters.get(name);
    if (null == al) {
        AtomicLong newAl = new AtomicLong();
        al = counters.putIfAbsent(name, newAl);
        if (null == al) {
            al = newAl;
        }
    }
    al.incrementAndGet();
    name = getParent(name);
    if (S.notBlank(name)) {
        countOnce_(name);
    }
}
 
Example #28
Source File: InMemoryChunkSet.java    From warp10-platform with Apache License 2.0 5 votes vote down vote up
/**
 * Optimize all non current chunks by shrinking their buffers.
 * 
 * @param now
 */
long optimize(CapacityExtractorOutputStream out, long now, AtomicLong allocation) {
  
  if (this.ephemeral) {
    return 0L;
  }
  
  int currentChunk = chunk(now);
  
  long reclaimed = 0L;

  synchronized(this.chunks) {      
    for (int i = 0; i < this.chunks.length; i++) {
      if (null == this.chunks[i] || i == currentChunk) {
        continue;
      }
      int size = this.chunks[i].size();
      
      try {
        this.chunks[i].writeTo(out);
        int capacity = out.getCapacity();
        
        if (capacity > size) {
          this.chunks[i].resize(size);
          allocation.addAndGet(size);
          reclaimed += (capacity - size);
        }          
      } catch (IOException ioe) {          
      }
    }
  }
  
  return reclaimed;
}
 
Example #29
Source File: AtomicLongTest.java    From j2objc with Apache License 2.0 5 votes vote down vote up
/**
 * addAndGet adds given value to current, and returns current value
 */
public void testAddAndGet() {
    AtomicLong ai = new AtomicLong(1);
    assertEquals(3, ai.addAndGet(2));
    assertEquals(3, ai.get());
    assertEquals(-1, ai.addAndGet(-4));
    assertEquals(-1, ai.get());
}
 
Example #30
Source File: ConcurrentLinkedHashMap.java    From concurrentlinkedhashmap with Apache License 2.0 5 votes vote down vote up
/**
 * Records a read in the buffer and return its write count.
 *
 * @param bufferIndex the index to the chosen read buffer
 * @param node the entry in the page replacement policy
 * @return the number of writes on the chosen read buffer
 */
long recordRead(int bufferIndex, Node<K, V> node) {
  // The location in the buffer is chosen in a racy fashion as the increment
  // is not atomic with the insertion. This means that concurrent reads can
  // overlap and overwrite one another, resulting in a lossy buffer.
  final AtomicLong counter = readBufferWriteCount[bufferIndex];
  final long writeCount = counter.get();
  counter.lazySet(writeCount + 1);

  final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK);
  readBuffers[bufferIndex][index].lazySet(node);

  return writeCount;
}