Java Code Examples for org.apache.commons.lang3.tuple.ImmutablePair#getLeft()

The following examples show how to use org.apache.commons.lang3.tuple.ImmutablePair#getLeft() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DummyRepoState.java    From HubTurbo with GNU Lesser General Public License v3.0 6 votes vote down vote up
protected final Issue setMilestone(int issueId, Optional<Integer> milestone) {
    ImmutablePair<TurboIssue, IssueMetadata> mutables = produceMutables(issueId);
    TurboIssue issueToSet = mutables.getLeft();
    IssueMetadata metadataOfIssue = mutables.getRight();
    List<TurboIssueEvent> eventsOfIssue = metadataOfIssue.getEvents();

    // demilestone the issue, then set issue milestone using the new milestone
    issueToSet.getMilestone()
            .ifPresent(issueMilestone -> removeMilestoneFromIssue(issueMilestone, issueToSet, eventsOfIssue));
    milestone
            .ifPresent(newMilestone -> setMilestoneForIssue(newMilestone, issueToSet, eventsOfIssue));

    issueToSet.setUpdatedAt(LocalDateTime.now());
    // Replace originals with copies, and queue them up to be retrieved
    markUpdatedEvents(issueToSet, IssueMetadata.intermediate(eventsOfIssue, metadataOfIssue.getComments(), "", ""));

    Issue serverIssue = new Issue();
    milestone.ifPresent(newMilestone -> serverIssue.setMilestone(new Milestone().setNumber(newMilestone)));
    return serverIssue;
}
 
Example 2
Source File: GeneralGtpClient.java    From mylizzie with GNU General Public License v3.0 6 votes vote down vote up
@Override
public synchronized boolean onStdinReady(final ByteBuffer buffer) {
    if (!runningCommandQueue.isEmpty() && runningCommandQueue.stream().anyMatch(pair -> !pair.getLeft().isContinuous())) {
        return false;
    }

    ImmutablePair<GeneralGtpFuture, Consumer<String>> futurePair = stagineCommandQueue.poll();
    if (futurePair != null) {
        GeneralGtpFuture future = futurePair.getLeft();
        buffer.put(future.getCommand().getBytes());
        if (!future.getCommand().endsWith("\n")) {
            buffer.put((byte) '\n');
        }
        buffer.flip();

        runningCommandQueue.offer(futurePair);
        miscProcessor.execute(() -> engineGtpCommandObserverList.forEach(observer -> observer.accept(future.getCommand())));

        // In case of some particular situations
        if (future.isContinuous() && !stagineCommandQueue.isEmpty()) {
            return true;
        }
    }

    return false;
}
 
Example 3
Source File: DummyRepoState.java    From HubTurbo with GNU Lesser General Public License v3.0 6 votes vote down vote up
public boolean editIssueState(int issueId, boolean isOpen) {
    ImmutablePair<TurboIssue, IssueMetadata> mutables = produceMutables(issueId);
    TurboIssue toEdit = mutables.getLeft();
    IssueMetadata metadataOfIssue = mutables.getRight();
    List<TurboIssueEvent> eventsOfIssue = metadataOfIssue.getEvents();

    eventsOfIssue.add(new TurboIssueEvent(new User().setLogin("test-nonself"),
                                          isOpen ? IssueEventType.Reopened : IssueEventType.Closed,
                                          new Date()));
    toEdit.setOpen(isOpen);
    toEdit.setUpdatedAt(LocalDateTime.now());

    markUpdatedEvents(toEdit, IssueMetadata.intermediate(eventsOfIssue, metadataOfIssue.getComments(), "", ""));

    return true;
}
 
Example 4
Source File: DummyRepoState.java    From HubTurbo with GNU Lesser General Public License v3.0 6 votes vote down vote up
protected final Issue setAssignee(int issueId, Optional<String> newAssigneeLoginName) {
    ImmutablePair<TurboIssue, IssueMetadata> mutables = produceMutables(issueId);
    TurboIssue toSet = mutables.getLeft();
    IssueMetadata metadataOfIssue = mutables.getRight();
    List<TurboIssueEvent> eventsOfIssue = metadataOfIssue.getEvents();

    if (toSet.getAssignee().isPresent()) {
        String assigneeOfIssue = toSet.getAssignee().get();
        eventsOfIssue.add(new TurboIssueEvent(new User().setLogin("test-nonself"),
                IssueEventType.Unassigned,
                new Date()).setAssignedUser(new User().setLogin(assigneeOfIssue)));
    }

    eventsOfIssue.add(new TurboIssueEvent(new User().setLogin("test-nonself"),
            IssueEventType.Assigned,
            new Date()).setAssignedUser(new User().setLogin(newAssigneeLoginName.get())));
    toSet.setAssignee(newAssigneeLoginName.get());
    toSet.setUpdatedAt(LocalDateTime.now());

    markUpdatedEvents(toSet, IssueMetadata.intermediate(eventsOfIssue, metadataOfIssue.getComments(), "", ""));

    Issue newIssue = new Issue();
    newIssue.setAssignee(new User().setLogin(newAssigneeLoginName.get()));
    return newIssue;
}
 
Example 5
Source File: DummyRepoState.java    From HubTurbo with GNU Lesser General Public License v3.0 6 votes vote down vote up
protected TurboIssue commentOnIssue(String author, String commentText, int issueId) {
    // Get copies of issue itself and its metadata
    ImmutablePair<TurboIssue, IssueMetadata> mutables = produceMutables(issueId);
    TurboIssue toComment = mutables.getLeft();
    IssueMetadata metadataOfIssue = mutables.getRight();
    List<Comment> commentsOfIssue = metadataOfIssue.getComments();

    // Mutate the copies
    Comment toAdd = new Comment();
    toAdd.setBody(commentText);
    toAdd.setCreatedAt(new Date());
    toAdd.setUser(new User().setLogin(author));
    commentsOfIssue.add(toAdd);
    toComment.setUpdatedAt(LocalDateTime.now());
    toComment.setCommentCount(toComment.getCommentCount() + 1);

    // Replace originals with copies, and queue them up to be retrieved
    markUpdatedComments(toComment,
                        IssueMetadata.intermediate(metadataOfIssue.getEvents(), commentsOfIssue, "", ""));

    return toComment;
}
 
Example 6
Source File: LibraryFragment.java    From Hentoid with Apache License 2.0 6 votes vote down vote up
/**
 * Loads current shelf of books to into the paged mode adapter
 * NB : A bookshelf is the portion of the collection that is displayed on screen by the paged mode
 * The width of the shelf is determined by the "Quantity per page" setting
 *
 * @param iLibrary Library to extract the shelf from
 */
private void loadBookshelf(PagedList<Content> iLibrary) {
    if (iLibrary.isEmpty()) {
        itemAdapter.set(Collections.emptyList());
        fastAdapter.notifyDataSetChanged();
    } else {
        ImmutablePair<Integer, Integer> bounds = getShelfBound(pager.getCurrentPageNumber(), iLibrary.size());
        int minIndex = bounds.getLeft();
        int maxIndex = bounds.getRight();

        if (minIndex >= maxIndex) { // We just deleted the last item of the last page => Go back one page
            pager.setCurrentPage(pager.getCurrentPageNumber() - 1);
            loadBookshelf(iLibrary);
            return;
        }

        populateBookshelf(iLibrary, pager.getCurrentPageNumber());
    }
}
 
Example 7
Source File: DeployOvfTemplateService.java    From cs-actions with Apache License 2.0 6 votes vote down vote up
public void deployOvfTemplate(final HttpInputs httpInputs, final VmInputs vmInputs, final String templatePath,
                              final Map<String, String> ovfNetworkMap, final Map<String, String> ovfPropertyMap)
        throws Exception {
    final ConnectionResources connectionResources = new ConnectionResources(httpInputs, vmInputs);
    try {
        final ImmutablePair<ManagedObjectReference, OvfCreateImportSpecResult> pair = createLeaseSetup(connectionResources, vmInputs, templatePath, ovfNetworkMap, ovfPropertyMap);
        final ManagedObjectReference httpNfcLease = pair.getLeft();
        final OvfCreateImportSpecResult importSpecResult = pair.getRight();

        final HttpNfcLeaseInfo httpNfcLeaseInfo = getHttpNfcLeaseInfoWhenReady(connectionResources, httpNfcLease);
        final List<HttpNfcLeaseDeviceUrl> deviceUrls = httpNfcLeaseInfo.getDeviceUrl();
        final ProgressUpdater progressUpdater = executor.isParallel() ?
                new AsyncProgressUpdater(getDisksTotalNoBytes(importSpecResult), httpNfcLease, connectionResources) :
                new SyncProgressUpdater(getDisksTotalNoBytes(importSpecResult), httpNfcLease, connectionResources);

        executor.execute(progressUpdater);
        transferVmdkFiles(templatePath, importSpecResult, deviceUrls, progressUpdater);
        executor.shutdown();
    } finally {
        if (httpInputs.isCloseSession()) {
            connectionResources.getConnection().disconnect();
            clearConnectionFromContext(httpInputs.getGlobalSessionObject());
        }
    }
}
 
Example 8
Source File: SchemaVersionLifecycleManager.java    From registry with Apache License 2.0 5 votes vote down vote up
public void executeState(Long schemaVersionId, Byte targetState, byte[] transitionDetails)
        throws SchemaLifecycleException, SchemaNotFoundException {
    ImmutablePair<SchemaVersionLifecycleContext, SchemaVersionLifecycleState> schemaLifeCycleContextAndState =
            createSchemaVersionLifeCycleContextAndState(schemaVersionId);
    SchemaVersionLifecycleContext schemaVersionLifecycleContext = schemaLifeCycleContextAndState.getLeft();
    SchemaVersionLifecycleState currentState = schemaLifeCycleContextAndState.getRight();

    schemaVersionLifecycleContext.setState(currentState);
    schemaVersionLifecycleContext.setDetails(transitionDetails);
    SchemaVersionLifecycleStateTransition transition =
            new SchemaVersionLifecycleStateTransition(currentState.getId(), targetState);
    SchemaVersionLifecycleStateAction action = schemaVersionLifecycleContext.getSchemaLifeCycleStatesMachine()
                                                                            .getTransitions()
                                                                            .get(transition);
    try {
        List<SchemaVersionLifecycleStateTransitionListener> listeners =
                schemaVersionLifecycleContext.getSchemaLifeCycleStatesMachine()
                                             .getListeners()
                                             .getOrDefault(transition, DEFAULT_LISTENERS);

        listeners.stream().forEach(listener -> listener.preStateTransition(schemaVersionLifecycleContext));
        action.execute(schemaVersionLifecycleContext);
        listeners.stream().forEach(listener -> listener.postStateTransition(schemaVersionLifecycleContext));
    } catch (SchemaLifecycleException e) {
        Throwable cause = e.getCause();
        if (cause != null && cause instanceof SchemaNotFoundException) {
            throw (SchemaNotFoundException) cause;
        }
        throw e;
    }
}
 
Example 9
Source File: DummyRepoState.java    From HubTurbo with GNU Lesser General Public License v3.0 5 votes vote down vote up
protected final List<Label> setLabels(int issueId, List<String> newLabels) throws IOException {
    if (newLabels.contains("exception")) {
        throw new IOException("Exception label was set");
    }
    // Get copies of issue itself and its metadata
    ImmutablePair<TurboIssue, IssueMetadata> mutables = produceMutables(issueId);
    TurboIssue toSet = mutables.getLeft();
    IssueMetadata metadataOfIssue = mutables.getRight();
    List<TurboIssueEvent> eventsOfIssue = metadataOfIssue.getEvents();

    // Mutate the copies
    List<String> labelsOfIssue = toSet.getLabels();
    labelsOfIssue.stream()
            .filter(existingLabel -> !newLabels.contains(existingLabel))
            .forEach(labelName -> eventsOfIssue.add(
                    new TurboIssueEvent(new User().setLogin("test-nonself"), IssueEventType.Unlabeled, new Date())
                            .setLabelName(labelName)
                            .setLabelColour(labels.get(labelName).getColour())));

    newLabels.stream()
            .filter(newLabel -> !labelsOfIssue.contains(newLabel))
            .forEach(newLabel -> eventsOfIssue.add(
                    new TurboIssueEvent(new User().setLogin("test-nonself"), IssueEventType.Labeled, new Date())
                            .setLabelName(newLabel)
                            .setLabelColour(labels.get(newLabel).getColour())));

    toSet.setLabels(newLabels);
    toSet.setUpdatedAt(LocalDateTime.now());

    // Replace originals with copies, and queue them up to be retrieved
    markUpdatedEvents(toSet, IssueMetadata.intermediate(eventsOfIssue, metadataOfIssue.getComments(), "", ""));

    return newLabels.stream().map(new Label()::setName).collect(Collectors.toList());
}
 
Example 10
Source File: LibraryFragment.java    From Hentoid with Apache License 2.0 5 votes vote down vote up
/**
 * Displays the current "bookshelf" (section of the list corresponding to the selected page)
 * A shelf contains as many books as the user has set in Preferences
 * <p>
 * Used in paged mode only
 *
 * @param iLibrary    Library to display books from
 * @param shelfNumber Number of the shelf to display
 */
private void populateBookshelf(@NonNull PagedList<Content> iLibrary, int shelfNumber) {
    if (Preferences.getEndlessScroll()) return;

    ImmutablePair<Integer, Integer> bounds = getShelfBound(shelfNumber, iLibrary.size());
    int minIndex = bounds.getLeft();
    int maxIndex = bounds.getRight();

    List<ContentItem> contentItems = Stream.of(iLibrary.subList(minIndex, maxIndex)).withoutNulls().map(c -> new ContentItem(c, touchHelper, ContentItem.ViewType.LIBRARY)).toList();
    itemAdapter.set(contentItems);
    fastAdapter.notifyDataSetChanged();
}
 
Example 11
Source File: ListResultsComparisonITCase.java    From find with MIT License 5 votes vote down vote up
@Test
public void testNoOverlap() {
    final ImmutablePair<String, String> terms = findService.getPairOfTermsThatDoNotShareResults();

    assertThat("Found pair of terms that have no overlapping results", terms.getLeft(), not(""));

    findPage.waitForParametricValuesToLoad();
    final Query polar = new Query(terms.getLeft());
    final Query opposites = new Query(terms.getRight());

    searchAndSave(polar, "polar");
    savedSearchService.openNewTab();

    findPage.waitUntilDatabasesLoaded();

    searchAndSave(opposites, "opposites");
    savedSearchService.compareCurrentWith("polar");

    resultsComparison = elementFactory.getResultsComparison();
    Waits.loadOrFadeWait();
    resultsComparison.goToListView();
    final ListView listView = resultsComparison.resultsView(AppearsIn.BOTH);

    assertThat(listView.getResults(), empty());
    assertThat(listView, containsText("No results found"));

    findPage.goBackToSearch();
}
 
Example 12
Source File: PatternTest.java    From WarpPI with Apache License 2.0 5 votes vote down vote up
private void testMultiplePatterns(
		final Function shouldNotMatch,
		final List<ImmutablePair<Pattern, Function>> patternsAndMatchingFunctions
) {
	for (final ImmutablePair<Pattern, Function> patternAndMatchingFunction : patternsAndMatchingFunctions) {
		final Pattern pattern = patternAndMatchingFunction.getLeft();
		final Function shouldMatch = patternAndMatchingFunction.getRight();

		assertFalse(pattern.match(shouldNotMatch).isPresent());

		final Optional<Map<String, Function>> subFunctions = pattern.match(shouldMatch);
		assertTrue(subFunctions.isPresent());
		assertEquals(shouldMatch, pattern.replace(mathContext, subFunctions.get()));
	}
}
 
Example 13
Source File: RedisRateLimitingDAO.java    From conductor with Apache License 2.0 5 votes vote down vote up
/**
 * This method evaluates if the {@link TaskDef} is rate limited or not based on {@link Task#getRateLimitPerFrequency()}
 * and {@link Task#getRateLimitFrequencyInSeconds()} if not checks the {@link Task} is rate limited or not based on {@link Task#getRateLimitPerFrequency()}
 * and {@link Task#getRateLimitFrequencyInSeconds()}
 *
 * The rate limiting is implemented using the Redis constructs of sorted set and TTL of each element in the rate limited bucket.
 * <ul>
 *     <li>All the entries that are in the not in the frequency bucket are cleaned up by leveraging {@link DynoProxy#zremrangeByScore(String, String, String)},
 *     this is done to make the next step of evaluation efficient</li>
 *     <li>A current count(tasks executed within the frequency) is calculated based on the current time and the beginning of the rate limit frequency time(which is current time - {@link Task#getRateLimitFrequencyInSeconds()} in millis),
 *     this is achieved by using {@link DynoProxy#zcount(String, double, double)} </li>
 *     <li>Once the count is calculated then a evaluation is made to determine if it is within the bounds of {@link Task#getRateLimitPerFrequency()}, if so the count is increased and an expiry TTL is added to the entry</li>
 * </ul>
 *
 * @param task: which needs to be evaluated whether it is rateLimited or not
 * @return true: If the {@link Task} is rateLimited
 * 		false: If the {@link Task} is not rateLimited
 */
@Override
public boolean exceedsRateLimitPerFrequency(Task task, TaskDef taskDef) {
    //Check if the TaskDefinition is not null then pick the definition values or else pick from the Task
    ImmutablePair<Integer, Integer> rateLimitPair = Optional.ofNullable(taskDef)
            .map(definition -> new ImmutablePair<>(definition.getRateLimitPerFrequency(), definition.getRateLimitFrequencyInSeconds()))
            .orElse(new ImmutablePair<>(task.getRateLimitPerFrequency(), task.getRateLimitFrequencyInSeconds()));

    int rateLimitPerFrequency = rateLimitPair.getLeft();
    int rateLimitFrequencyInSeconds = rateLimitPair.getRight();
    if (rateLimitPerFrequency <= 0 || rateLimitFrequencyInSeconds <=0) {
        logger.debug("Rate limit not applied to the Task: {}  either rateLimitPerFrequency: {} or rateLimitFrequencyInSeconds: {} is 0 or less",
                task, rateLimitPerFrequency, rateLimitFrequencyInSeconds);
        return false;
    } else {
        logger.debug("Evaluating rate limiting for TaskId: {} with TaskDefinition of: {} with rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {}",
                task.getTaskId(), task.getTaskDefName(),rateLimitPerFrequency, rateLimitFrequencyInSeconds);
        long currentTimeEpochMillis = System.currentTimeMillis();
        long currentTimeEpochMinusRateLimitBucket = currentTimeEpochMillis - (rateLimitFrequencyInSeconds * 1000);
        String key = nsKey(TASK_RATE_LIMIT_BUCKET, task.getTaskDefName());
        dynoClient.zremrangeByScore(key, "-inf", String.valueOf(currentTimeEpochMinusRateLimitBucket));
        int currentBucketCount = Math.toIntExact(
                dynoClient.zcount(key,
                        currentTimeEpochMinusRateLimitBucket,
                        currentTimeEpochMillis));
        if (currentBucketCount < rateLimitPerFrequency) {
            dynoClient.zadd(key, currentTimeEpochMillis, String.valueOf(currentTimeEpochMillis));
            dynoClient.expire(key, rateLimitFrequencyInSeconds);
            logger.info("TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} within the rate limit with current count {}",
                    task.getTaskId(), task.getTaskDefName(), rateLimitPerFrequency, rateLimitFrequencyInSeconds, ++currentBucketCount);
            Monitors.recordTaskRateLimited(task.getTaskDefName(), rateLimitPerFrequency);
            return false;
        } else {
            logger.info("TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} is out of bounds of rate limit with current count {}",
                    task.getTaskId(), task.getTaskDefName(), rateLimitPerFrequency, rateLimitFrequencyInSeconds, currentBucketCount);
            return true;
        }
    }
}
 
Example 14
Source File: QueryMetricsWriter.java    From azure-cosmosdb-java with MIT License 5 votes vote down vote up
private void writeSchedulingMetrics(ClientSideMetrics clientSideMetrics) {
    this.writeBeforeSchedulingMetrics();
    List<ImmutablePair<String, SchedulingTimeSpan>> partitionSchedulingTimeSpans = clientSideMetrics.getPartitionSchedulingTimeSpans();
    partitionSchedulingTimeSpans.sort((o1, o2) -> (int) (o2.right.getResponseTime() - o1.right.getResponseTime()));
    for (ImmutablePair<String, SchedulingTimeSpan> partitionSchedulingDuration :
            partitionSchedulingTimeSpans) {
        String partitionId = partitionSchedulingDuration.getLeft();
        SchedulingTimeSpan schedulingDuration = partitionSchedulingDuration.getRight();

        this.writePartitionSchedulingDuration(partitionId, schedulingDuration);
    }

    this.writeAfterSchedulingMetrics();
}
 
Example 15
Source File: SessionOptionManager.java    From Bats with Apache License 2.0 5 votes vote down vote up
private boolean withinRange(final String name) {
  final int queryNumber = session.getQueryCount();
  final ImmutablePair<Integer, Integer> pair = shortLivedOptions.get(name);
  final int start = pair.getLeft();
  final int end = pair.getRight();
  return start <= queryNumber && queryNumber < end;
}
 
Example 16
Source File: OMDirectoryCreateRequest.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Construct OmKeyInfo for every parent directory in missing list.
 * @param ozoneManager
 * @param keyArgs
 * @param missingParents list of parent directories to be created
 * @param inheritAcls ACLs to be assigned to each new parent dir
 * @param trxnLogIndex
 * @return
 * @throws IOException
 */
public static List<OmKeyInfo> getAllParentInfo(OzoneManager ozoneManager,
    KeyArgs keyArgs, List<String> missingParents, List<OzoneAcl> inheritAcls,
    long trxnLogIndex) throws IOException {
  OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
  List<OmKeyInfo> missingParentInfos = new ArrayList<>();

  ImmutablePair<Long, Long> objIdRange = OMFileRequest
      .getObjIdRangeFromTxId(trxnLogIndex);
  long baseObjId = objIdRange.getLeft();
  long maxObjId = objIdRange.getRight();
  long maxLevels = maxObjId - baseObjId;
  long objectCount = 1; // baseObjID is used by the leaf directory

  String volumeName = keyArgs.getVolumeName();
  String bucketName = keyArgs.getBucketName();
  String keyName = keyArgs.getKeyName();

  for (String missingKey : missingParents) {
    long nextObjId = baseObjId + objectCount;
    if (nextObjId > maxObjId) {
      throw new OMException("Too many directories in path. Exceeds limit of "
          + maxLevels + ". Unable to create directory: " + keyName
          + " in volume/bucket: " + volumeName + "/" + bucketName,
          INVALID_KEY_NAME);
    }

    LOG.debug("missing parent {} getting added to KeyTable", missingKey);
    // what about keyArgs for parent directories? TODO
    OmKeyInfo parentKeyInfo = createDirectoryKeyInfoWithACL(
        missingKey, keyArgs, nextObjId, inheritAcls, trxnLogIndex);
    objectCount++;

    missingParentInfos.add(parentKeyInfo);
    omMetadataManager.getKeyTable().addCacheEntry(
        new CacheKey<>(omMetadataManager.getOzoneKey(volumeName,
            bucketName, parentKeyInfo.getKeyName())),
        new CacheValue<>(Optional.of(parentKeyInfo),
            trxnLogIndex));
  }

  return missingParentInfos;
}
 
Example 17
Source File: OperatorWrapper.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
public void addSummary(TableBuilder tb) {
  try {
    String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build();
    tb.appendCell(path);
    tb.appendCell(operatorName);

    double setupSum = 0.0;
    double processSum = 0.0;
    double waitSum = 0.0;
    double memSum = 0.0;
    for (ImmutablePair<OperatorProfile, Integer> ip : ops) {
      OperatorProfile profile = ip.getLeft();
      setupSum += profile.getSetupNanos();
      processSum += profile.getProcessNanos();
      waitSum += profile.getWaitNanos();
      memSum += profile.getPeakLocalMemoryAllocated();
    }

    final ImmutablePair<OperatorProfile, Integer> shortSetup = Collections.min(ops, Comparators.setupTime);
    final ImmutablePair<OperatorProfile, Integer> longSetup = Collections.max(ops, Comparators.setupTime);
    tb.appendNanos(shortSetup.getLeft().getSetupNanos());
    tb.appendNanos(Math.round(setupSum / size));
    tb.appendNanos(longSetup.getLeft().getSetupNanos());

    final ImmutablePair<OperatorProfile, Integer> shortProcess = Collections.min(ops, Comparators.processTime);
    final ImmutablePair<OperatorProfile, Integer> longProcess = Collections.max(ops, Comparators.processTime);
    tb.appendNanos(shortProcess.getLeft().getProcessNanos());
    tb.appendNanos(Math.round(processSum / size));
    tb.appendNanos(longProcess.getLeft().getProcessNanos());

    final ImmutablePair<OperatorProfile, Integer> shortWait = Collections.min(ops, Comparators.waitTime);
    final ImmutablePair<OperatorProfile, Integer> longWait = Collections.max(ops, Comparators.waitTime);
    tb.appendNanos(shortWait.getLeft().getWaitNanos());
    tb.appendNanos(Math.round(waitSum / size));
    tb.appendNanos(longWait.getLeft().getWaitNanos());

    final ImmutablePair<OperatorProfile, Integer> peakMem = Collections.max(ops, Comparators.operatorPeakMemory);
    tb.appendBytes(Math.round(memSum / size));
    tb.appendBytes(peakMem.getLeft().getPeakLocalMemoryAllocated());
  } catch (IOException e) {
    logger.debug("Failed to add summary", e);
  }
}
 
Example 18
Source File: GetPileupSummariesIntegrationTest.java    From gatk with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@Test
public void test() {
    final File output = createTempFile("output", ".table");

    final ArgumentsBuilder args = new ArgumentsBuilder()
            .addInput(NA12878)
            .addVCF(new File(thousandGenomes))
            .addIntervals(new File(thousandGenomes))
            .addOutput(output)
            .add(GetPileupSummaries.MAX_SITE_AF_SHORT_NAME, 0.9);

    runCommandLine(args);

    final ImmutablePair<String, List<PileupSummary>> sampleAndResult = PileupSummary.readFromFile(output);

    final List<PileupSummary> result = sampleAndResult.getRight();
    final String sample = sampleAndResult.getLeft();
    Assert.assertEquals(sample, "NA12878");

    // compare to IGV manual inspection
    final PileupSummary ps1 = result.get(0);
    Assert.assertEquals(ps1.getContig(), "20");
    Assert.assertEquals(ps1.getStart(), 10000117);
    Assert.assertEquals(ps1.getRefCount(), 35);
    Assert.assertEquals(ps1.getAltCount(), 28);
    Assert.assertEquals(ps1.getOtherAltCount(), 0);
    Assert.assertEquals(ps1.getAlleleFrequency(), 0.605);

    final PileupSummary ps2 = result.get(1);
    Assert.assertEquals(ps2.getStart(), 10000211);
    Assert.assertEquals(ps2.getRefCount(), 27);
    Assert.assertEquals(ps2.getAltCount(), 28);
    Assert.assertEquals(ps2.getAlleleFrequency(), 0.603);

    final PileupSummary ps3 = result.get(2);
    Assert.assertEquals(ps3.getStart(), 10000439);
    Assert.assertEquals(ps3.getRefCount(), 0);
    Assert.assertEquals(ps3.getAltCount(), 76);
    Assert.assertEquals(ps3.getAlleleFrequency(), 0.81);

    final PileupSummary ps4 = result.get(8);
    Assert.assertEquals(ps4.getStart(), 10001298);
    Assert.assertEquals(ps4.getRefCount(), 0);
    Assert.assertEquals(ps4.getAltCount(), 53);
    Assert.assertEquals(ps4.getOtherAltCount(), 0);
    Assert.assertEquals(ps4.getAlleleFrequency(), 0.809);

}
 
Example 19
Source File: ZenScoreEstimator.java    From mylizzie with GNU General Public License v3.0 4 votes vote down vote up
@Override
public String estimateScoreRaw() {
    ImmutablePair<String, Double> score = estimateScore();
    return score.getLeft() + "+" + score.getRight();
}
 
Example 20
Source File: PrivateKeyStorage.java    From steem-java-api-wrapper with GNU General Public License v3.0 2 votes vote down vote up
/**
 * Internal method to convert a WIF private key into an ECKey object.
 * 
 * @param wifPrivateKey
 *            The key pair to convert.
 * @return The converted key pair.
 */
private ImmutablePair<PrivateKeyType, ECKey> convertWifToECKeyPair(
        ImmutablePair<PrivateKeyType, String> wifPrivateKey) {
    return new ImmutablePair<>(wifPrivateKey.getLeft(),
            DumpedPrivateKey.fromBase58(null, wifPrivateKey.getRight(), new Sha256ChecksumProvider()).getKey());
}