Java Code Examples for org.apache.commons.lang3.tuple.Pair#getLeft()

The following examples show how to use org.apache.commons.lang3.tuple.Pair#getLeft() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SustainCalculator.java    From TagRec with GNU Affero General Public License v3.0 6 votes vote down vote up
private double calculateResourceActivations(int userId, int resource, double beta, double r){

	Set<Integer> topics = this.resTopicTrainList.get(resource).keySet();
	
	// Vector, write 1 for every existing topic
	GVector currentResource = new GVector(this.numberOfTopics);
	currentResource.zero();
	for (Integer t : topics)
		currentResource.setElement(t, 1);
	
	double maxActivation = 0.0;
	double totalActivation = 0.0;
	
	for (GVector c : this.userClusterList.get(userId)){
		Pair<Double, GVector> activationPair = this.calculateActivation(currentResource, c, this.userLambdaList.get(userId), r);
		if (activationPair.getLeft()>maxActivation){
			maxActivation = activationPair.getLeft();
		}
		totalActivation+= activationPair.getLeft();
	}
	
	maxActivation = Math.pow(maxActivation, beta)/Math.pow(totalActivation, beta)*maxActivation;
	return maxActivation;
}
 
Example 2
Source File: SdxServiceTest.java    From cloudbreak with Apache License 2.0 6 votes vote down vote up
@Test
void testCreateNOTInternalSdxClusterFromLightDutyTemplateWhenBaseLocationSpecifiedShouldCreateStackRequestWithSettedUpBaseLocation()
        throws IOException, TransactionExecutionException {
    when(transactionService.required(isA(Supplier.class))).thenAnswer(invocation -> invocation.getArgument(0, Supplier.class).get());
    String lightDutyJson = FileReaderUtils.readFileFromClasspath("/runtime/7.1.0/aws/light_duty.json");
    when(cdpConfigService.getConfigForKey(any())).thenReturn(JsonUtil.readValue(lightDutyJson, StackV4Request.class));
    //doNothing().when(cloudStorageLocationValidator.validate("s3a://some/dir", ));
    SdxClusterRequest sdxClusterRequest = new SdxClusterRequest();
    sdxClusterRequest.setClusterShape(LIGHT_DUTY);
    sdxClusterRequest.setEnvironment("envir");
    SdxCloudStorageRequest cloudStorage = new SdxCloudStorageRequest();
    cloudStorage.setFileSystemType(FileSystemType.S3);
    cloudStorage.setBaseLocation("s3a://some/dir");
    cloudStorage.setS3(new S3CloudStorageV1Parameters());
    sdxClusterRequest.setCloudStorage(cloudStorage);
    long id = 10L;
    when(sdxClusterRepository.save(any(SdxCluster.class))).thenAnswer(invocation -> {
        SdxCluster sdxWithId = invocation.getArgument(0, SdxCluster.class);
        sdxWithId.setId(id);
        return sdxWithId;
    });
    mockEnvironmentCall(sdxClusterRequest, CloudPlatform.AWS);
    Pair<SdxCluster, FlowIdentifier> result = underTest.createSdx(USER_CRN, CLUSTER_NAME, sdxClusterRequest, null);
    SdxCluster createdSdxCluster = result.getLeft();
    assertEquals("s3a://some/dir", createdSdxCluster.getCloudStorageBaseLocation());
}
 
Example 3
Source File: OnlineOfflineStateModelFactory.java    From terrapin with Apache License 2.0 6 votes vote down vote up
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 4
Source File: SendMessageCache.java    From AvatarMQ with Apache License 2.0 6 votes vote down vote up
public void parallelDispatch(LinkedList<MessageDispatchTask> list) {
    List<Callable<Void>> tasks = new ArrayList<Callable<Void>>();
    int startPosition = 0;
    Pair<Integer, Integer> pair = calculateBlocks(list.size(), list.size());
    int numberOfThreads = pair.getRight();
    int blocks = pair.getLeft();

    for (int i = 0; i < numberOfThreads; i++) {
        MessageDispatchTask[] task = new MessageDispatchTask[blocks];
        phaser.register();
        System.arraycopy(list.toArray(), startPosition, task, 0, blocks);
        tasks.add(new SendMessageTask(phaser, task));
        startPosition += blocks;
    }

    ExecutorService executor = Executors.newFixedThreadPool(numberOfThreads);
    for (Callable<Void> element : tasks) {
        executor.submit(element);
    }
}
 
Example 5
Source File: EventManagerIntegrationTest.java    From alf.io with GNU General Public License v3.0 6 votes vote down vote up
@Test
public void testValidationBoundedFailedPendingTickets() {
    List<TicketCategoryModification> categories = Collections.singletonList(
        new TicketCategoryModification(null, "default", 10,
            new DateTimeModification(LocalDate.now(), LocalTime.now()),
            new DateTimeModification(LocalDate.now(), LocalTime.now()),
            DESCRIPTION, BigDecimal.TEN, false, "", true, null, null, null, null, null, 0, null, null, AlfioMetadata.empty()));
    Pair<Event, String> pair = initEvent(categories, organizationRepository, userManager, eventManager, eventRepository);
    Event event = pair.getLeft();
    String username = pair.getRight();

    TicketCategory category = ticketCategoryRepository.findAllTicketCategories(event.getId()).get(0);
    Map<String, String> categoryDescription = ticketCategoryDescriptionRepository.descriptionForTicketCategory(category.getId());

    List<Integer> tickets = ticketRepository.selectTicketInCategoryForUpdate(event.getId(), category.getId(), 1, Collections.singletonList(Ticket.TicketStatus.FREE.name()));
    String reservationId = "12345678";
    ticketReservationRepository.createNewReservation(reservationId, ZonedDateTime.now(), DateUtils.addDays(new Date(), 1), null, "en", event.getId(), event.getVat(), event.isVatIncluded(), event.getCurrency());
    ticketRepository.reserveTickets(reservationId, tickets, category.getId(), "en", 100, "CHF");
    TicketCategoryModification tcm = new TicketCategoryModification(category.getId(), category.getName(), 10,
        DateTimeModification.fromZonedDateTime(category.getUtcInception()),
        DateTimeModification.fromZonedDateTime(category.getUtcExpiration()),
        categoryDescription, category.getPrice(), false, "", false, null, null, null, null, null, 0, null, null, AlfioMetadata.empty());
    Result<TicketCategory> result = eventManager.updateCategory(category.getId(), event, tcm, username);
    assertFalse(result.isSuccess());
}
 
Example 6
Source File: GpadGpiObjectsBuilder.java    From owltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private boolean addEvidenceCode(String eco, GeneAnnotation ga, SimpleEcoMapper mapper) {
	Pair<String,String> pair = mapper.getGoCode(eco);
	boolean added = false;
	if (pair != null) {
		String goCode = pair.getLeft();
		if (goCode != null) {
			ga.setEvidence(goCode, eco);
			added = true;
		}
	}
	if (added == false) {
		boolean fatal = gpadIncludeUnmappedECO == false;
		reportEvidenceIssue(eco, "No corresponding GO evidence code found", fatal);
		if (fatal) {
			return false;
		}
		// fallback always add the ECO class at least
		ga.setEvidence(null, eco);
	}
	return true;
}
 
Example 7
Source File: CalculateContamination.java    From gatk with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Override
public Object doWork() {
    final Pair<String, List<PileupSummary>> sampleAndsites = PileupSummary.readFromFile(inputPileupSummariesTable);
    final String sample = sampleAndsites.getLeft();
    final List<PileupSummary> sites = filterSitesByCoverage(sampleAndsites.getRight());

    // used the matched normal to genotype (i.e. find hom alt sites) if available
    final List<PileupSummary> genotypingSites = matchedPileupSummariesTable == null ? sites :
            filterSitesByCoverage(PileupSummary.readFromFile(matchedPileupSummariesTable).getRight());

    final ContaminationModel genotypingModel = new ContaminationModel(genotypingSites);

    if (outputTumorSegmentation != null) {
        final ContaminationModel tumorModel = matchedPileupSummariesTable == null ? genotypingModel : new ContaminationModel(sites);
        MinorAlleleFractionRecord.writeToFile(sample, tumorModel.segmentationRecords(), outputTumorSegmentation);
    }

    final Pair<Double, Double> contaminationAndError = genotypingModel.calculateContaminationFromHoms(sites);
    ContaminationRecord.writeToFile(Arrays.asList(
            new ContaminationRecord(sample, contaminationAndError.getLeft(), contaminationAndError.getRight())), outputTable);

    return "SUCCESS";
}
 
Example 8
Source File: AreaSelection.java    From litematica with GNU Lesser General Public License v3.0 5 votes vote down vote up
protected void updateCalculatedOrigin()
{
    Pair<BlockPos, BlockPos> pair = PositionUtils.getEnclosingAreaCorners(this.subRegionBoxes.values());

    if (pair != null)
    {
        this.calculatedOrigin = pair.getLeft();
    }
    else
    {
        this.calculatedOrigin = BlockPos.ORIGIN;
    }

    this.calculatedOriginDirty = false;
}
 
Example 9
Source File: JdbcTableReadContextLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public TableReadContext load(TableRuntimeContext tableRuntimeContext) throws StageException, SQLException {
  Pair<String, List<Pair<Integer, String>>> queryAndParamValToSet;
  final boolean nonIncremental = tableRuntimeContext.isUsingNonIncrementalLoad();
  if (nonIncremental) {
    final String baseTableQuery = OffsetQueryUtil.buildBaseTableQuery(tableRuntimeContext, quoteChar);
    queryAndParamValToSet = Pair.of(baseTableQuery, Collections.emptyList());
  } else {
    queryAndParamValToSet = OffsetQueryUtil.buildAndReturnQueryAndParamValToSet(
        tableRuntimeContext,
        offsets.get(tableRuntimeContext.getOffsetKey()),
        quoteChar,
        tableJdbcELEvalContext
    );
  }

  if (isReconnect) {
    LOGGER.debug("close the connection");
    connectionManager.closeConnection();
  }

  return new TableReadContext(
      connectionManager.getVendor(),
      connectionManager.getConnection(),
      queryAndParamValToSet.getLeft(),
      queryAndParamValToSet.getRight(),
      fetchSize,
      nonIncremental
  );
}
 
Example 10
Source File: TicketReservationManagerUnitTest.java    From alf.io with GNU General Public License v3.0 5 votes vote down vote up
@Test
public void calcReservationCostWithASVatNotIncludedASNone() {
    initReservationWithAdditionalServices(false, AdditionalService.VatType.NONE, 10, 10);
    //fourth: event price vat not included, additional service VAT n/a
    Pair<TotalPrice, Optional<PromoCodeDiscount>> priceAndDiscount = manager.totalReservationCostWithVAT(TICKET_RESERVATION_ID);
    TotalPrice fourth = priceAndDiscount.getLeft();
    assertTrue(priceAndDiscount.getRight().isEmpty());
    assertEquals(21, fourth.getPriceWithVAT());
    assertEquals(1, fourth.getVAT());
}
 
Example 11
Source File: EntryCacheImpl.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Integer, Long> evictEntries(long sizeToFree) {
    checkArgument(sizeToFree > 0);
    Pair<Integer, Long> evicted = entries.evictLeastAccessedEntries(sizeToFree);
    int evictedEntries = evicted.getLeft();
    long evictedSize = evicted.getRight();
    if (log.isDebugEnabled()) {
        log.debug(
                "[{}] Doing cache eviction of at least {} Mb -- Deleted {} entries - Total size deleted: {} Mb "
                        + " -- Current Size: {} Mb",
                ml.getName(), sizeToFree / MB, evictedEntries, evictedSize / MB, entries.getSize() / MB);
    }
    manager.entriesRemoved(evictedSize);
    return evicted;
}
 
Example 12
Source File: ManagedCursorImpl.java    From pulsar with Apache License 2.0 5 votes vote down vote up
void initializeCursorPosition(Pair<PositionImpl, Long> lastPositionCounter) {
    readPosition = ledger.getNextValidPosition(lastPositionCounter.getLeft());
    markDeletePosition = lastPositionCounter.getLeft();

    // Initialize the counter such that the difference between the messages written on the ML and the
    // messagesConsumed is 0, to ensure the initial backlog count is 0.
    messagesConsumedCounter = lastPositionCounter.getRight();
}
 
Example 13
Source File: PubchemSynonymFinder.java    From act with GNU General Public License v3.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  org.apache.commons.cli.Options opts = new org.apache.commons.cli.Options();
  for (Option.Builder b : OPTION_BUILDERS) {
    opts.addOption(b.build());
  }

  CommandLine cl = null;
  try {
    CommandLineParser parser = new DefaultParser();
    cl = parser.parse(opts, args);
  } catch (ParseException e) {
    System.err.format("Argument parsing failed: %s\n", e.getMessage());
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  if (cl.hasOption("help")) {
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    return;
  }

  File rocksDBFile = new File(cl.getOptionValue(OPTION_INDEX_PATH));
  if (!rocksDBFile.isDirectory()) {
    System.err.format("Index directory does not exist or is not a directory at '%s'", rocksDBFile.getAbsolutePath());
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  List<String> compoundIds = null;
  if (cl.hasOption(OPTION_PUBCHEM_COMPOUND_ID)) {
    compoundIds = Collections.singletonList(cl.getOptionValue(OPTION_PUBCHEM_COMPOUND_ID));
  } else if (cl.hasOption(OPTION_IDS_FILE)) {
    File idsFile = new File(cl.getOptionValue(OPTION_IDS_FILE));
    if (!idsFile.exists()) {
      System.err.format("Cannot find Pubchem CIDs file at %s", idsFile.getAbsolutePath());
      HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
      System.exit(1);
    }

    compoundIds = getCIDsFromFile(idsFile);

    if (compoundIds.size() == 0) {
      System.err.format("Found zero Pubchem CIDs to process in file at '%s', exiting", idsFile.getAbsolutePath());
      HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
      System.exit(1);
    }
  } else {
    System.err.format("Must specify one of '%s' or '%s'; index is too big to print all synonyms.",
        OPTION_PUBCHEM_COMPOUND_ID, OPTION_IDS_FILE);
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  // Run a quick check to warn users of malformed ids.
  compoundIds.forEach(x -> {
    if (!PC_CID_PATTERN.matcher(x).matches()) { // Use matches() for complete matching.
      LOGGER.warn("Specified compound id does not match expected format: %s", x);
    }
  });

  LOGGER.info("Opening DB and searching for %d Pubchem CIDs", compoundIds.size());
  Pair<RocksDB, Map<PubchemTTLMerger.COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles = null;
  Map<String, PubchemSynonyms> results = new LinkedHashMap<>(compoundIds.size());
  try {
    dbAndHandles = PubchemTTLMerger.openExistingRocksDB(rocksDBFile);
    RocksDB db = dbAndHandles.getLeft();
    ColumnFamilyHandle cidToSynonymsCfh =
        dbAndHandles.getRight().get(PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_SYNONYMS);

    for (String cid : compoundIds) {
      PubchemSynonyms synonyms = null;
      byte[] val = db.get(cidToSynonymsCfh, cid.getBytes(UTF8));
      if (val != null) {
        ObjectInputStream oi = new ObjectInputStream(new ByteArrayInputStream(val));
        // We're relying on our use of a one-value-type per index model here so we can skip the instanceof check.
        synonyms = (PubchemSynonyms) oi.readObject();
      } else {
        LOGGER.warn("No synonyms available for compound id '%s'", cid);
      }
      results.put(cid, synonyms);
    }
  } finally {
    if (dbAndHandles != null) {
      dbAndHandles.getLeft().close();
    }
  }

  try (OutputStream outputStream =
           cl.hasOption(OPTION_OUTPUT) ? new FileOutputStream(cl.getOptionValue(OPTION_OUTPUT)) : System.out) {
    OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValue(outputStream, results);
    new OutputStreamWriter(outputStream).append('\n');
  }
  LOGGER.info("Done searching for Pubchem synonyms");
}
 
Example 14
Source File: CustomSwapper.java    From ExternalPlugins with GNU General Public License v3.0 4 votes vote down vote up
private void executePair(Pair<Tab, Rectangle> pair)
{
	switch (pair.getLeft())
	{
		case COMBAT:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.COMBAT.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case EQUIPMENT:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.EQUIPMENT.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case INVENTORY:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.INVENTORY.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case PRAYER:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.PRAYER.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case SPELLBOOK:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.SPELLBOOK.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
	}
}
 
Example 15
Source File: LibFederatedAgg.java    From systemds with Apache License 2.0 4 votes vote down vote up
public static MatrixBlock aggregateUnaryMatrix(MatrixObject federatedMatrix, AggregateUnaryOperator operator) {
	// find out the characteristics after aggregation
	MatrixCharacteristics mc = new MatrixCharacteristics();
	operator.indexFn.computeDimension(federatedMatrix.getDataCharacteristics(), mc);
	// make outBlock right size
	MatrixBlock ret = new MatrixBlock((int) mc.getRows(), (int) mc.getCols(), operator.aggOp.initialValue);
	List<Pair<FederatedRange, Future<FederatedResponse>>> idResponsePairs = new ArrayList<>();
	// distribute aggregation operation to all workers
	for (Map.Entry<FederatedRange, FederatedData> entry : federatedMatrix.getFedMapping().entrySet()) {
		FederatedData fedData = entry.getValue();
		if (!fedData.isInitialized())
			throw new DMLRuntimeException("Not all FederatedData was initialized for federated matrix");
		Future<FederatedResponse> future = fedData.executeFederatedOperation(
			new FederatedRequest(FederatedRequest.FedMethod.AGGREGATE, operator), true);
		idResponsePairs.add(new ImmutablePair<>(entry.getKey(), future));
	}
	try {
		//TODO replace with block operations
		for (Pair<FederatedRange, Future<FederatedResponse>> idResponsePair : idResponsePairs) {
			FederatedRange range = idResponsePair.getLeft();
			FederatedResponse federatedResponse = idResponsePair.getRight().get();
			int[] beginDims = range.getBeginDimsInt();
			MatrixBlock mb = (MatrixBlock) federatedResponse.getData()[0];
			// TODO performance optimizations
			MatrixValue.CellIndex cellIndex = new MatrixValue.CellIndex(0, 0);
			ValueFunction valueFn = operator.aggOp.increOp.fn;
			// Add worker response to resultBlock
			for (int r = 0; r < mb.getNumRows(); r++)
				for (int c = 0; c < mb.getNumColumns(); c++) {
					// Get the output index where the result should be placed by the index function
					// -> map input row/col to output row/col
					cellIndex.set(r + beginDims[0], c + beginDims[1]);
					operator.indexFn.execute(cellIndex, cellIndex);
					int resultRow = cellIndex.row;
					int resultCol = cellIndex.column;
					double newValue;
					if (valueFn instanceof KahanFunction) {
						// TODO iterate along correct axis to use correction correctly
						// temporary solution to execute correct overloaded method
						KahanObject kobj = new KahanObject(ret.quickGetValue(resultRow, resultCol), 0);
						newValue = ((KahanObject) valueFn.execute(kobj, mb.quickGetValue(r, c)))._sum;
					}
					else {
						// TODO special handling for `ValueFunction`s which do not implement `.execute(double, double)`
						// "Add" two partial calculations together with ValueFunction
						newValue = valueFn.execute(ret.quickGetValue(resultRow, resultCol), mb.quickGetValue(r, c));
					}
					ret.quickSetValue(resultRow, resultCol, newValue);
				}
		}
	}
	catch (Exception e) {
		throw new DMLRuntimeException("Federated binary aggregation failed", e);
	}
	return ret;
}
 
Example 16
Source File: UrlTrie.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * prefix is different from RootPage that the RootPage has an extra char in the end. And this last char will be used to construct the root node of the trie.
 */
public UrlTrie(String rootPage, UrlTrieNode root) {
  Pair<String, UrlTrieNode> defaults = getPrefixAndDefaultRoot(rootPage);
  _prefix = defaults.getLeft();
  _root = root;
}
 
Example 17
Source File: PathsHelper.java    From azure-cosmosdb-java with MIT License 4 votes vote down vote up
public static boolean validateDocumentId(String resourceIdString) {
    Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
    return pair.getLeft() && pair.getRight().getDocument() != 0;
}
 
Example 18
Source File: ForkJoinDynamicTaskMapper.java    From conductor with Apache License 2.0 4 votes vote down vote up
/**
 * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link TaskType#FORK_JOIN_DYNAMIC}.
 * Creates a Fork Task, followed by the Dynamic tasks and a final JOIN task.
 * <p>The definitions of the dynamic forks that need to be scheduled are available in the {@link WorkflowTask#getInputParameters()}
 * which are accessed using the {@link TaskMapperContext#getTaskToSchedule()}. The dynamic fork task definitions are referred by a key value either by
 * {@link WorkflowTask#getDynamicForkTasksParam()} or by {@link WorkflowTask#getDynamicForkJoinTasksParam()}
 * </p>
 * When creating the list of tasks to be scheduled a set of preconditions are validated:
 * <ul>
 * <li>If the input parameter representing the Dynamic fork tasks is available as part of {@link WorkflowTask#getDynamicForkTasksParam()} then
 * the input for the dynamic task is validated to be a map by using {@link WorkflowTask#getDynamicForkTasksInputParamName()}</li>
 * <li>If the input parameter representing the Dynamic fork tasks is available as part of {@link WorkflowTask#getDynamicForkJoinTasksParam()} then
 * the input for the dynamic tasks is available in the payload of the tasks definition.
 * </li>
 * <li>A check is performed that the next following task in the {@link WorkflowDef} is a {@link TaskType#JOIN}</li>
 * </ul>
 *
 * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId
 * @throws TerminateWorkflowException In case of:
 *                                    <ul>
 *                                    <li>
 *                                    When the task after {@link TaskType#FORK_JOIN_DYNAMIC} is not a {@link TaskType#JOIN}
 *                                    </li>
 *                                    <li>
 *                                    When the input parameters for the dynamic tasks are not of type {@link Map}
 *                                    </li>
 *                                    </ul>
 * @return: List of tasks in the following order:
 * <ul>
 * <li>
 * {@link SystemTaskType#FORK} with {@link Task.Status#COMPLETED}
 * </li>
 * <li>
 * Might be any kind of task, but this is most cases is a UserDefinedTask with {@link Task.Status#SCHEDULED}
 * </li>
 * <li>
 * {@link SystemTaskType#JOIN} with {@link Task.Status#IN_PROGRESS}
 * </li>
 * </ul>
 */
@Override
public List<Task> getMappedTasks(TaskMapperContext taskMapperContext) throws TerminateWorkflowException {
    logger.debug("TaskMapperContext {} in ForkJoinDynamicTaskMapper", taskMapperContext);

    WorkflowTask taskToSchedule = taskMapperContext.getTaskToSchedule();
    Workflow workflowInstance = taskMapperContext.getWorkflowInstance();
    String taskId = taskMapperContext.getTaskId();
    int retryCount = taskMapperContext.getRetryCount();

    List<Task> mappedTasks = new LinkedList<>();
    //Get the list of dynamic tasks and the input for the tasks
    Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> workflowTasksAndInputPair =
            Optional.ofNullable(taskToSchedule.getDynamicForkTasksParam())
                    .map(dynamicForkTaskParam -> getDynamicForkTasksAndInput(taskToSchedule, workflowInstance, dynamicForkTaskParam))
                    .orElseGet(() -> getDynamicForkJoinTasksAndInput(taskToSchedule, workflowInstance));

    List<WorkflowTask> dynForkTasks = workflowTasksAndInputPair.getLeft();
    Map<String, Map<String, Object>> tasksInput = workflowTasksAndInputPair.getRight();

    // Create Fork Task which needs to be followed by the dynamic tasks
    Task forkDynamicTask = createDynamicForkTask(taskToSchedule, workflowInstance, taskId, dynForkTasks);

    mappedTasks.add(forkDynamicTask);

    List<String> joinOnTaskRefs = new LinkedList<>();
    //Add each dynamic task to the mapped tasks and also get the last dynamic task in the list,
    // which indicates that the following task after that needs to be a join task
    for (WorkflowTask wft : dynForkTasks) {//TODO this is a cyclic dependency, break it out using function composition
        List<Task> forkedTasks = taskMapperContext.getDeciderService().getTasksToBeScheduled(workflowInstance, wft, retryCount);
        for (Task forkedTask : forkedTasks) {
            Map<String, Object> forkedTaskInput = tasksInput.get(forkedTask.getReferenceTaskName());
            forkedTask.getInputData().putAll(forkedTaskInput);
        }
        mappedTasks.addAll(forkedTasks);
        //Get the last of the dynamic tasks so that the join can be performed once this task is done
        Task last = forkedTasks.get(forkedTasks.size() - 1);
        joinOnTaskRefs.add(last.getReferenceTaskName());
    }

    //From the workflow definition get the next task and make sure that it is a JOIN task.
    //The dynamic fork tasks need to be followed by a join task
    WorkflowTask joinWorkflowTask = workflowInstance
            .getWorkflowDefinition()
            .getNextTask(taskToSchedule.getTaskReferenceName());

    if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) {
        throw new TerminateWorkflowException("Dynamic join definition is not followed by a join task.  Check the blueprint");
    }

    // Create Join task
    HashMap<String, Object> joinInput = new HashMap<>();
    joinInput.put("joinOn", joinOnTaskRefs);
    Task joinTask = createJoinTask(workflowInstance, joinWorkflowTask, joinInput);
    mappedTasks.add(joinTask);

    return mappedTasks;
}
 
Example 19
Source File: PaperOptions.java    From maze-harvester with GNU General Public License v3.0 4 votes vote down vote up
private static double parseMargin(String specifier) throws ParseException {
  Pair<Double, String> parts = parseUnits(specifier);
  double scale = parts.getLeft();
  double measure = Double.parseDouble(parts.getRight());
  return measure * scale;
}
 
Example 20
Source File: ChannelAllocationState.java    From emodb with Apache License 2.0 3 votes vote down vote up
public synchronized SlabAllocation allocate(PeekingIterator<Integer> eventSizes) {
    checkArgument(eventSizes.hasNext());

    if (!isAttached()) {
        return null;
    }

    int remaining = Constants.MAX_SLAB_SIZE - _slabConsumed;

    Pair<Integer, Integer> countAndBytesConsumed = DefaultSlabAllocator.defaultAllocationCount(_slabConsumed, _slabBytesConsumed, eventSizes);

    int offsetForNewAllocation = _slabConsumed;

    _slabConsumed += countAndBytesConsumed.getLeft();
    _slabBytesConsumed += countAndBytesConsumed.getRight();

    // Check for case where no more slots could be allocated because slab is full either because
    // the max # of slots is consumed or the max # of bytes is consumed
    if (countAndBytesConsumed.getLeft() == 0) {

        detach().release();
        return null;

    }

    if (countAndBytesConsumed.getLeft() < remaining) {

        // All events fit in current slab, leave it attached
        return new DefaultSlabAllocation(_slab.addRef(), offsetForNewAllocation, countAndBytesConsumed.getLeft());

    } else {

        // Whatever is left of this slab is consumed. Return the rest of the slab. Detach from it so we'll allocate a new one next time.
        return new DefaultSlabAllocation(detach(), offsetForNewAllocation, countAndBytesConsumed.getLeft());

    }
}