Java Code Examples for java.util.concurrent.atomic.AtomicInteger#addAndGet()

The following examples show how to use java.util.concurrent.atomic.AtomicInteger#addAndGet() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Group("g2")
@GroupThreads(2)
@Benchmark
public void queueDrainAtomic2() {
    AtomicInteger w = wip;
    if (w.getAndIncrement() == 0) {
        int missed = 1;

        for (;;) {
            counter++;
            
            missed = w.addAndGet(-missed);
            if (missed == 0) {
                break;
            }
        }
    }
}
 
Example 2
protected void removeChildSlaveRefCounts(VirtualFileSystemInode childInode, Map<String, AtomicInteger> childRefCounts) {
    if (!childRefCounts.isEmpty()) {
        for (Map.Entry<String, AtomicInteger> refEntry : childRefCounts.entrySet()) {
            AtomicInteger currentCount;
            currentCount = _slaveRefCounts.get(refEntry.getKey());
            if (currentCount == null) {
                // Shouldn't happen since we're removing a child, therefore we should have
                // counts for the slaves referenced by the child
                logger.error("Removing child {} from {} child contained a count of {} for slave {} but the slave has no count against this directory", childInode.getPath(), getPath(), refEntry.getValue().intValue(), refEntry.getKey());
                continue;
            }
            currentCount.addAndGet(-refEntry.getValue().intValue());
        }
        if (!isRoot()) {
            getParent().removeChildSlaveRefCounts(childInode, childRefCounts);
        }
    }
    commit();
}
 
Example 3
Source Project: datawave   File: ContentIndexCounters.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Increments the counter denoted by counterName by the given value. The counter's value will only be written to the context if it exceeds bufferSize
 * 
 * @param groupName
 *            The name of the counter's group
 * @param counterName
 *            The name of the counter to increment
 * @param value
 *            The amount to increment the counter by
 * @param reporter
 *            The current task's context
 */
public void incrementValue(String groupName, String counterName, int value, StatusReporter reporter) {
    Map<String,AtomicInteger> group = counts.get(groupName);
    if (group == null) {
        group = new HashMap<>();
        counts.put(groupName, group);
    }
    
    if (group.containsKey(counterName)) {
        AtomicInteger val = group.get(counterName);
        
        if (val.get() > bufferSize && reporter != null) {
            reporter.getCounter(groupName, counterName).increment(val.getAndSet(0));
        }
        
        val.addAndGet(value);
    } else {
        group.put(counterName, new AtomicInteger(1));
    }
}
 
Example 4
@Override
public void handle(List<Integer> perBatchList,PartitionThreadEntity partitionThreadEntity,Map<String, ?> paramsMap){
    final AtomicInteger atomicInteger = (AtomicInteger) paramsMap.get("result");
    for (Integer value : perBatchList){

        LOGGER.trace(
                        "{},BatchNumber:[{}],CurrentListSize:[{}],EachSize:[{}],Name:[{}],TotalListCount:[{}]",
                        partitionThreadEntity.toString(),
                        partitionThreadEntity.getBatchNumber(),
                        partitionThreadEntity.getCurrentListSize(),
                        partitionThreadEntity.getEachSize(),
                        partitionThreadEntity.getName(),
                        partitionThreadEntity.getTotalListCount());
        atomicInteger.addAndGet(value);
    }

}
 
Example 5
private void submitInvokePoints() {
    AtomicInteger uploadCounter = new AtomicInteger(0);
    List<DataPoint> points = invokeDataQueue.peek();
    InvocationContext invocationContext = InvocationContextImpl.Factory.currentInstance();
    invocationContext.timeout(5000);
    while (points != null) {
        try {
            if (!points.isEmpty()) {
                LOGGER.debug(Thread.currentThread().getName() + "::uploading submitPoints ");

                CounterClientFactory.COUNTER_CLIENT.submitPoints(points);
                uploadCounter.addAndGet(points.size());
            }
            invokeDataQueue.remove(points);

            points = invokeDataQueue.peek();
        } catch (Throwable e) {
            // 上送出错
            LOGGER.error(e.getMessage(), e);
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(Thread.currentThread().getName()
                        + " points:" + uploadCounter.get() + " uploaded before error, now  release the lock.");
            }
            return;
        }
    }

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(Thread.currentThread().getName() + " no more points, total points:" + uploadCounter.get()
                + " uploaded, now release the lock.");
    }
    InvocationContextImpl.Factory.removeCurrentInstance();
}
 
Example 6
Source Project: toml4j   File: MultilineStringValueReader.java    License: MIT License 5 votes vote down vote up
@Override
public Object read(String s, AtomicInteger index, Context context) {
  AtomicInteger line = context.line;
  int startLine = line.get();
  int originalStartIndex = index.get();
  int startIndex = index.addAndGet(3);
  int endIndex = -1;
  
  if (s.charAt(startIndex) == '\n') {
    startIndex = index.incrementAndGet();
    line.incrementAndGet();
  }
  
  for (int i = startIndex; i < s.length(); i = index.incrementAndGet()) {
    char c = s.charAt(i);
    
    if (c == '\n') {
      line.incrementAndGet();
    } else if (c == '"' && s.length() > i + 2 && s.charAt(i + 1) == '"' && s.charAt(i + 2) == '"') {
      endIndex = i;
      index.addAndGet(2);
      break;
    }
  }
  
  if (endIndex == -1) {
    Results.Errors errors = new Results.Errors();
    errors.unterminated(context.identifier.getName(), s.substring(originalStartIndex), startLine);
    return errors;
  }

  s = s.substring(startIndex, endIndex);
  s = s.replaceAll("\\\\\\s+", "");
  s = StringValueReaderWriter.STRING_VALUE_READER_WRITER.replaceUnicodeCharacters(s);
  s = StringValueReaderWriter.STRING_VALUE_READER_WRITER.replaceSpecialCharacters(s);

  return s;
}
 
Example 7
private static List<String> handleNumberedInclude(List<String> lines, AtomicInteger lineNumber) {
    Include ia = Include.fromNumberedInclude(lines, 0, lines.get(lineNumber.getAndIncrement()));

    // Skip over the previously-included text.
    lineNumber.addAndGet(ia.endWithinBlock() - ia.startWithinBlock() + 1);

    return ia.asBracketedAsciiDocInclude();
}
 
Example 8
private List<InsertValueContext> getInsertValueContexts(final List<Object> parameters, final AtomicInteger parametersOffset) {
    List<InsertValueContext> result = new LinkedList<>();
    for (Collection<ExpressionSegment> each : getSqlStatement().getAllValueExpressions()) {
        InsertValueContext insertValueContext = new InsertValueContext(each, parameters, parametersOffset.get());
        result.add(insertValueContext);
        parametersOffset.addAndGet(insertValueContext.getParametersCount());
    }
    return result;
}
 
Example 9
@Test
public void test_stateful_start() {

    for (int i = 0; i < 100; i++) {
        final PUBLISH publish = createPublish(i, QoS.AT_LEAST_ONCE, "topic" + i);
        persistence.add("client" + i, false, publish, 100L, DISCARD, false, i % bucketCount);
    }

    persistence.stop();

    persistence.start();

    final ConcurrentHashMap<Integer, Map<Key, AtomicInteger>> queueSizeBuckets = persistence.getQueueSizeBuckets();

    final AtomicInteger counter = new AtomicInteger();

    for (final Map<Key, AtomicInteger> value : queueSizeBuckets.values()) {
        if (value != null) {
            for (final AtomicInteger count : value.values()) {
                if (count != null) {
                    counter.addAndGet(count.get());
                }
            }
        }
    }

    assertEquals(100, counter.get());
    assertEquals((Long.MAX_VALUE / 2) + 99, ClientQueuePersistenceSerializer.NEXT_PUBLISH_NUMBER.get());

}
 
Example 10
CompletableFuture<Writer> writeAsync() {
  Objects.requireNonNull(asyncExecutor, "asyncExecutor == null");
  final Random r = new Random(seed);
  final int size = fileSize.getSizeInt();

  final CompletableFuture<Writer> returnFuture = new CompletableFuture<>();
  final AtomicInteger callCount = new AtomicInteger();
  final AtomicInteger n = new AtomicInteger();
  for(; n.get() < size; ) {
    final int offset = n.get();
    final int remaining = size - offset;
    final int length = Math.min(remaining, buffer.length);
    final boolean close = length == remaining;

    final ByteBuffer b = randomBytes(length, r);

    callCount.incrementAndGet();
    n.addAndGet(length);

    LOG.trace("writeAsync {}, offset={}, length={}, close? {}",
        fileName, offset, length, close);
    client.writeAsync(fileName, offset, close, b)
        .thenAcceptAsync(written -> Assert.assertEquals(length, (long)written), asyncExecutor)
        .thenRun(() -> {
          final int count = callCount.decrementAndGet();
          LOG.trace("writeAsync {}, offset={}, length={}, close? {}: n={}, callCount={}",
              fileName, offset, length, close, n.get(), count);
          if (n.get() == size && count == 0) {
            returnFuture.complete(this);
          }
        })
        .exceptionally(e -> {
          returnFuture.completeExceptionally(e);
          return null;
        });
  }
  return returnFuture;
}
 
Example 11
Source Project: deeplearning4j   File: NetworkOrganizer.java    License: Apache License 2.0 5 votes vote down vote up
protected int getNumDivergents() {
    if (nodes.size() == 0)
        return 0;

    AtomicInteger cnt = new AtomicInteger(nodes.size() - 1);
    for (VirtualNode node : nodes.values()) {
        cnt.addAndGet(node.getNumDivergents());
    }
    return cnt.get();
}
 
Example 12
Source Project: SI   File: SeqNumManager.java    License: BSD 2-Clause "Simplified" License 4 votes vote down vote up
public int get(String name) {

		synchronized (this) {
			
			AtomicInteger seqNum = sequenceMap.get(name);
			if (seqNum == null) {

				sequenceMap.put(name, new AtomicInteger(0));
				dao.setSequence(name, 0);
				return 0;
				
			} else {
				
				int num = seqNum.addAndGet(1);
				dao.setSequence(name,  num);
				return num;
				
			}
			
		}
	}
 
Example 13
@Test
public void testConcurrentSplitAssignmentForSingleHost() {
	try {
		final int NUM_THREADS = 10;
		final int NUM_SPLITS = 500;
		final int SUM_OF_IDS = (NUM_SPLITS-1) * (NUM_SPLITS) / 2;
		
		// load some splits
		Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
		for (int i = 0; i < NUM_SPLITS; i++) {
			splits.add(new LocatableInputSplit(i, "testhost"));
		}
		
		final LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
		
		final AtomicInteger splitsRetrieved = new AtomicInteger(0);
		final AtomicInteger sumOfIds = new AtomicInteger(0);
		
		Runnable retriever = new Runnable() {
			
			@Override
			public void run() {
				LocatableInputSplit split;
				while ((split = ia.getNextInputSplit("testhost", 0)) != null) {
					splitsRetrieved.incrementAndGet();
					sumOfIds.addAndGet(split.getSplitNumber());
				}
			}
		};
		
		// create the threads
		Thread[] threads = new Thread[NUM_THREADS];
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i] = new Thread(retriever);
			threads[i].setDaemon(true);
		}
		
		// launch concurrently
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i].start();
		}
		
		// sync
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i].join(5000);
		}
		
		// verify
		for (int i = 0; i < NUM_THREADS; i++) {
			if (threads[i].isAlive()) {
				fail("The concurrency test case is erroneous, the thread did not respond in time.");
			}
		}
		
		assertEquals(NUM_SPLITS, splitsRetrieved.get());
		assertEquals(SUM_OF_IDS, sumOfIds.get());
		
		// nothing left
		assertNull(ia.getNextInputSplit("testhost", 0));
		
		assertEquals(0, ia.getNumberOfRemoteAssignments());
		assertEquals(NUM_SPLITS, ia.getNumberOfLocalAssignments());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 14
Source Project: tessera   File: StressSendIT.java    License: Apache License 2.0 4 votes vote down vote up
/** Quorum sends transaction with single public recipient key */
@Test
public void sendToSingleRecipientUntilFailureOrMaxReached() {
    LOGGER.info("stress test starting");

    final Party firstParty = partyHelper.findByAlias("A");
    final Party secondParty = partyHelper.findByAlias("B");
    byte[] transactionData = utils.createTransactionData();

    final AtomicInteger sendCounter = new AtomicInteger(0);
    final AtomicInteger invalidResults = new AtomicInteger(0);

    final List<Thread> stressThreads = new ArrayList<>();
    for (int i = 0; i < THREAD_COUNT; i++) {
        final Thread stressThread =
                new Thread(
                        () -> {
                            int currentCount = sendCounter.incrementAndGet();
                            while (currentCount < MAX_COUNT) {
                                final SendRequest sendRequest = new SendRequest();
                                sendRequest.setFrom(firstParty.getPublicKey());
                                sendRequest.setTo(secondParty.getPublicKey());
                                sendRequest.setPayload(transactionData);

                                try (Response response =
                                        client.target(firstParty.getQ2TUri())
                                                .path(SEND_PATH)
                                                .request()
                                                .post(Entity.entity(sendRequest, MediaType.APPLICATION_JSON))) {

                                    if (response.getStatus() != 201) {
                                        LOGGER.info("Response is not 201. MessageCount=" + currentCount);
                                        sendCounter.addAndGet(MAX_COUNT);
                                        invalidResults.incrementAndGet();
                                    }
                                }

                                currentCount = sendCounter.incrementAndGet();
                                if (currentCount % 1000 == 0) {
                                    LOGGER.info("currentCount={}", currentCount);
                                }
                            }
                        });
        stressThread.start();
        stressThreads.add(stressThread);
    }

    // wait for stress threads to finish
    for (int i = 0; i < THREAD_COUNT; i++) {
        try {
            stressThreads.get(i).join();
        } catch (InterruptedException e) {
            LOGGER.error("Error while waiting for clients to stop.", e);
        }
    }
    LOGGER.info("stress test finished");
    assertThat(invalidResults.get()).isEqualTo(0);
}
 
Example 15
@Test
public void testConcurrentSplitAssignment() {
	try {
		final int NUM_THREADS = 10;
		final int NUM_SPLITS = 500;
		final int SUM_OF_IDS = (NUM_SPLITS-1) * (NUM_SPLITS) / 2;
		
		Set<InputSplit> splits = new HashSet<InputSplit>();
		for (int i = 0; i < NUM_SPLITS; i++) {
			splits.add(new GenericInputSplit(i, NUM_SPLITS));
		}
		
		final DefaultInputSplitAssigner ia = new DefaultInputSplitAssigner(splits);
		
		final AtomicInteger splitsRetrieved = new AtomicInteger(0);
		final AtomicInteger sumOfIds = new AtomicInteger(0);
		
		Runnable retriever = new Runnable() {
			
			@Override
			public void run() {
				String host = "";
				GenericInputSplit split;
				while ((split = (GenericInputSplit) ia.getNextInputSplit(host, 0)) != null) {
					splitsRetrieved.incrementAndGet();
					sumOfIds.addAndGet(split.getSplitNumber());
				}
			}
		};
		
		// create the threads
		Thread[] threads = new Thread[NUM_THREADS];
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i] = new Thread(retriever);
			threads[i].setDaemon(true);
		}
		
		// launch concurrently
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i].start();
		}
		
		// sync
		for (int i = 0; i < NUM_THREADS; i++) {
			threads[i].join(5000);
		}
		
		// verify
		for (int i = 0; i < NUM_THREADS; i++) {
			if (threads[i].isAlive()) {
				fail("The concurrency test case is erroneous, the thread did not respond in time.");
			}
		}
		
		assertEquals(NUM_SPLITS, splitsRetrieved.get());
		assertEquals(SUM_OF_IDS, sumOfIds.get());
		
		// nothing left
		assertNull(ia.getNextInputSplit("", 0));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 16
Source Project: consulo   File: Messages.java    License: Apache License 2.0 4 votes vote down vote up
private void animate() {
  final int height = getPreferredSize().height;
  final int frameCount = 10;
  final boolean toClose = isShowing();


  final AtomicInteger i = new AtomicInteger(-1);
  final Alarm animator = new Alarm(myDisposable);
  final Runnable runnable = new Runnable() {
    @Override
    public void run() {
      int state = i.addAndGet(1);

      double linearProgress = (double)state / frameCount;
      if (toClose) {
        linearProgress = 1 - linearProgress;
      }
      myLayout.myPhase = (1 - Math.cos(Math.PI * linearProgress)) / 2;
      Window window = getPeer().getWindow();
      Rectangle bounds = window.getBounds();
      bounds.height = (int)(height * myLayout.myPhase);

      window.setBounds(bounds);

      if (state == 0 && !toClose && window.getOwner() != null) {
        consulo.ui.Window uiWindow = TargetAWT.from(window.getOwner());
        IdeFrame ideFrame = uiWindow.getUserData(IdeFrame.KEY);
        if(ideFrame != null) {
          WindowManager.getInstance().requestUserAttention(ideFrame, true);
        }
      }

      if (state < frameCount) {
        animator.addRequest(this, 10);
      }
      else if (toClose) {
        MessageDialog.super.dispose();
      }
    }
  };
  animator.addRequest(runnable, 10, ModalityState.stateForComponent(getRootPane()));
}
 
Example 17
Source Project: dyno   File: VotingHostsFromTokenRange.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public CircularList<Host> getVotingHosts() {
    if (votingHosts.getSize() == 0) {
        if(effectiveVotingSize % 2 == 0) {
            throw new IllegalStateException("Cannot do voting with even number of nodes for voting");
        }
        List<HostToken> allHostTokens = tokenMapSupplier.getTokens(ImmutableSet.copyOf(hostSupplier.getHosts()));
        if (allHostTokens.size() < MIN_VOTING_SIZE) {
            throw new IllegalStateException(String.format("Cannot perform voting with less than %d nodes", MIN_VOTING_SIZE));
        }
        // Total number of hosts present per rack
        Map<String, Long> numHostsPerRack = allHostTokens.stream().map(ht -> ht.getHost().getRack()).collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
        AtomicInteger numHostsRequired = new AtomicInteger(effectiveVotingSize);
        // Map to keep track of number of hosts to take for voting from this rack
        Map<String, Integer> numHosts = new HashMap<>();
        // Sort racks to get the same order
        List<String> racks = numHostsPerRack.keySet().stream().sorted(Comparator.comparing(String::toString)).collect(Collectors.toList());
        for(String rack: racks) {
            // Take as many hosts as you can from this rack.
            int v = (int) Math.min(numHostsRequired.get(), numHostsPerRack.get(rack));
            numHostsRequired.addAndGet(-v);
            numHosts.put(rack, v);
            calculatedVotingSize.addAndGet(v);
        }
        if(calculatedVotingSize.get() % 2 == 0) {
            throw new IllegalStateException("Could not construct voting pool. Min number of hosts not met!");
        }
        Map<String, List<HostToken>> rackToHostToken = allHostTokens.stream()
                .collect(Collectors.groupingBy(ht -> ht.getHost().getRack()));
        // Get the final list of voting hosts
        List<Host> finalVotingHosts = rackToHostToken.entrySet().stream()
                // Sorting on token to get hosts deterministically.
                .sorted(Comparator.comparing(Map.Entry::getKey))
                .flatMap(e -> {
                    List<HostToken> temp = e.getValue();
                    temp.sort(HostToken::compareTo);
                    return temp.subList(0, numHosts.get(e.getKey())).stream();
                })
                .map(ht -> ht.getHost())
                .collect(Collectors.toList());
        votingHosts.swapWithList(finalVotingHosts);
    }
    return votingHosts;
}
 
Example 18
Source Project: dremio-oss   File: Metadata.java    License: Apache License 2.0 4 votes vote down vote up
private ParquetFileMetadata getParquetFileMetadata(FileAttributes file, AtomicInteger currentNumSplits, long maxSplits) throws IOException {
  final ParquetMetadata metadata =
    SingletonParquetFooterCache.readFooter(fs, file, ParquetMetadataConverter.NO_FILTER, maxFooterLength);
  final int numSplits = currentNumSplits.addAndGet(metadata.getBlocks().size());
  if (numSplits > maxSplits) {
    throw new TooManySplitsException(
      String.format("Too many splits encountered when processing parquet metadata at file %s, maximum is %d but encountered %d splits thus far.",
        file.getPath(), maxSplits, numSplits));
  }

  final MessageType schema = metadata.getFileMetaData().getSchema();

  Map<SchemaPath, OriginalType> originalTypeMap = Maps.newHashMap();
  schema.getPaths();
  for (String[] path : schema.getPaths()) {
    originalTypeMap.put(SchemaPath.getCompoundPath(path), getOriginalType(schema, path, 0));
  }

  List<RowGroupMetadata> rowGroupMetadataList = Lists.newArrayList();

  ArrayList<SchemaPath> ALL_COLS = new ArrayList<>();
  ALL_COLS.add(AbstractRecordReader.STAR_COLUMN);
  boolean autoCorrectCorruptDates = formatConfig.autoCorrectCorruptDates;
  ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility.detectCorruptDates(metadata, ALL_COLS, autoCorrectCorruptDates);
  if(logger.isDebugEnabled()){
    logger.debug(containsCorruptDates.toString());
  }
  final Map<ColumnTypeMetadata.Key, ColumnTypeMetadata> columnTypeInfo = Maps.newHashMap();
  int rowGroupIdx = 0;
  for (BlockMetaData rowGroup : metadata.getBlocks()) {
    List<ColumnMetadata> columnMetadataList = Lists.newArrayList();
    long length = 0;
    for (ColumnChunkMetaData col : rowGroup.getColumns()) {
      ColumnMetadata columnMetadata;

      // statistics might just have the non-null counts with no min/max they might be
      // initialized to zero instead of null.
      // check statistics actually have non null values (or) column has all nulls.
      boolean statsAvailable = (col.getStatistics() != null && !col.getStatistics().isEmpty()
        && (col.getStatistics().hasNonNullValue()) || col.getStatistics().getNumNulls() ==
        rowGroup.getRowCount());

      Statistics<?> stats = col.getStatistics();
      String[] columnName = col.getPath().toArray();
      SchemaPath columnSchemaName = SchemaPath.getCompoundPath(columnName);
      ColumnTypeMetadata columnTypeMetadata =
          new ColumnTypeMetadata(columnName, col.getType(), originalTypeMap.get(columnSchemaName));

      columnTypeInfo.put(new ColumnTypeMetadata.Key(columnTypeMetadata.name), columnTypeMetadata);
      if (statsAvailable) {
        // Write stats only if minVal==maxVal. Also, we then store only maxVal
        Object mxValue = null;
        if (stats.genericGetMax() != null && stats.genericGetMin() != null &&
            stats.genericGetMax().equals(stats.genericGetMin())) {
          mxValue = stats.genericGetMax();
          if (containsCorruptDates == ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION
              && columnTypeMetadata.originalType == OriginalType.DATE) {
            mxValue = ParquetReaderUtility.autoCorrectCorruptedDate((Integer) mxValue);
          }
        }
        columnMetadata =
            new ColumnMetadata(columnTypeMetadata.name, mxValue, stats.getNumNulls());
      } else {
        // log it under trace to avoid lot of log entries.
        logger.trace("Stats are not available for column {}, rowGroupIdx {}, file {}",
            columnSchemaName, rowGroupIdx, file.getPath());
        columnMetadata = new ColumnMetadata(columnTypeMetadata.name,null, null);
      }
      columnMetadataList.add(columnMetadata);
      length += col.getTotalSize();
    }

    RowGroupMetadata rowGroupMeta =
        new RowGroupMetadata(rowGroup.getStartingPos(), length, rowGroup.getRowCount(),
            getHostAffinity(fs, file, rowGroup.getStartingPos(), length), columnMetadataList);

    rowGroupMetadataList.add(rowGroupMeta);
    rowGroupIdx++;
  }

  return new ParquetFileMetadata(file, file.size(), rowGroupMetadataList, columnTypeInfo);
}
 
Example 19
Source Project: cosmo   File: SimpleUrlContentReader.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Gets and validates the content from the specified <code>url</code>.
 * 
 * @param url
 *            <code>URL</code> where to get the content from.
 * @param timeout
 *            maximum time to wait before abandoning the connection in milliseconds
 * @param headersMap
 *            headers to be sent when making the request to the specified URL
 * @return content read from the specified <code>url</code>
 */
public Set<NoteItem> getContent(String url, int timeoutInMillis, RequestOptions options) {
    CloseableHttpClient client = null;
    CloseableHttpResponse response = null;
    try {
        URL source = build(url, options);
        HttpGet request = new HttpGet(source.toURI());
        for (Entry<String, String> entry : options.headers().entrySet()) {
            request.addHeader(entry.getKey(), entry.getValue());
        }

        client = buildClient(timeoutInMillis, source);
        response = client.execute(request);

        InputStream contentStream = null;
        ByteArrayOutputStream baos = null;
        try {
            contentStream = response.getEntity().getContent();
            baos = new ByteArrayOutputStream();
            AtomicInteger counter = new AtomicInteger();
            byte[] buffer = new byte[1024];
            int offset = 0;
            long startTime = System.currentTimeMillis();
            while ((offset = contentStream.read(buffer)) != -1) {
                counter.addAndGet(offset);
                if (counter.get() > allowedContentSizeInBytes) {
                    throw new ExternalContentTooLargeException(
                            "Content from url " + url + " is larger then " + this.allowedContentSizeInBytes);
                }
                long now = System.currentTimeMillis();
                if (startTime + timeoutInMillis < now) {
                    throw new IOException("Too much time spent reading url: " + url);
                }
                baos.write(buffer, 0, offset);
            }
            Calendar calendar = new CalendarBuilder().build(new ByteArrayInputStream(baos.toByteArray()));
            this.postProcess(calendar);

            Set<NoteItem> externalItems = converter.asItems(calendar);

            validate(externalItems);

            return externalItems;
        } finally {
            close(contentStream);
            close(baos);
        }
    } catch (IOException | URISyntaxException | ParserException e) {
        throw new ExternalContentInvalidException(e);
    } finally {
        close(response);
        close(client);
    }
}
 
Example 20
Source Project: mug   File: FunctionsTest.java    License: Apache License 2.0 4 votes vote down vote up
@Test public void testCheckedBiConsumer_andThen() throws Throwable {
  AtomicInteger sum = new AtomicInteger();
  CheckedBiConsumer<Integer, Integer, Throwable> consumer = (a, b) -> sum.addAndGet(a + b);
  consumer.andThen(consumer).accept(1, 2);
  assertThat(sum.get()).isEqualTo(6);
}