Java Code Examples for org.apache.mesos.Protos#ExecutorInfo

The following examples show how to use org.apache.mesos.Protos#ExecutorInfo . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultV3TaskInfoRequestFactory.java    From titus-control-plane with Apache License 2.0 6 votes vote down vote up
private Protos.ExecutorInfo newExecutorInfo(Task task,
                                            Map<String, String> attributesMap,
                                            Optional<String> executorUriOverrideOpt) {

    boolean executorPerTask = attributesMap.containsKey(EXECUTOR_PER_TASK_LABEL);
    String executorName = LEGACY_EXECUTOR_NAME;
    String executorId = LEGACY_EXECUTOR_NAME;
    if (executorPerTask) {
        executorName = EXECUTOR_PER_TASK_EXECUTOR_NAME;
        executorId = EXECUTOR_PER_TASK_EXECUTOR_NAME + "-" + task.getId();
    }

    Protos.CommandInfo commandInfo = newCommandInfo(executorPerTask, executorUriOverrideOpt);
    return Protos.ExecutorInfo.newBuilder()
            .setExecutorId(Protos.ExecutorID.newBuilder().setValue(executorId).build())
            .setName(executorName)
            .setCommand(commandInfo)
            .build();
}
 
Example 2
Source File: MesosController.java    From twister2 with Apache License 2.0 5 votes vote down vote up
public Protos.ExecutorInfo getExecutorInfo(String jobID, String executorName) {
  Protos.ExecutorInfo.Builder builder = Protos.ExecutorInfo.newBuilder();
  builder.setExecutorId(Protos.ExecutorID.newBuilder().setValue(executorName));
  builder.setCommand(getCommandInfo(jobID, executorName));
  builder.setName(executorName);
  return builder.build();
}
 
Example 3
Source File: OfferEvaluatorTest.java    From dcos-commons with Apache License 2.0 5 votes vote down vote up
private Collection<Protos.Resource> getExpectedExecutorResources(Protos.ExecutorInfo executorInfo) {
    String executorCpuId = executorInfo.getResourcesList().stream()
            .filter(r -> r.getName().equals("cpus"))
            .map(ResourceUtils::getResourceId)
            .filter(o -> o.isPresent())
            .map(o -> o.get())
            .findFirst()
            .get();
    String executorMemId = executorInfo.getResourcesList().stream()
            .filter(r -> r.getName().equals("mem"))
            .map(ResourceUtils::getResourceId)
            .filter(o -> o.isPresent())
            .map(o -> o.get())
            .findFirst()
            .get();
    String executorDiskId = executorInfo.getResourcesList().stream()
            .filter(r -> r.getName().equals("disk"))
            .map(ResourceUtils::getResourceId)
            .filter(o -> o.isPresent())
            .map(o -> o.get())
            .findFirst()
            .get();

    Protos.Resource expectedExecutorCpu = ResourceTestUtils.getReservedCpus(0.1, executorCpuId);
    Protos.Resource expectedExecutorMem = ResourceTestUtils.getReservedMem(32, executorMemId);
    Protos.Resource expectedExecutorDisk = ResourceTestUtils.getReservedDisk(256, executorDiskId);

    return new ArrayList<>(Arrays.asList(expectedExecutorCpu, expectedExecutorMem, expectedExecutorDisk));
}
 
Example 4
Source File: ByteBufferSupport.java    From incubator-myriad with Apache License 2.0 5 votes vote down vote up
public static Protos.ExecutorInfo toExecutorInfo(ByteBuffer bb) {
  int size = bb.getInt();
  if (size > 0) {
    try {
      return Protos.ExecutorInfo.parseFrom(getBytes(bb, size));
    } catch (Exception e) {
      throw new RuntimeException("ByteBuffer not in expected format," + " failed to parse ExecutorInfo bytes", e);
    }
  } else {
    return null;
  }
}
 
Example 5
Source File: CassandraExecutor.java    From dcos-cassandra-service with Apache License 2.0 5 votes vote down vote up
@Override
public void registered(ExecutorDriver driver,
                       Protos.ExecutorInfo executorInfo,
                       Protos.FrameworkInfo frameworkInfo,
                       Protos.SlaveInfo slaveInfo) {
    cassandraTaskFactory = new CassandraTaskFactory(driver);
    customExecutor = new CustomExecutor(clusterJobExecutorService, cassandraTaskFactory);
}
 
Example 6
Source File: DefaultSchedulerTest.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
@Test
public void testLaunchTransient() throws Exception {
    Protos.Resource resource = ResourceTestUtils.getUnreservedCpus(3);
    Offer offer = OfferTestUtils.getCompleteOffer(resource);
    Protos.TaskInfo taskInfo = TaskTestUtils.getTaskInfo(resource);
    Protos.ExecutorInfo execInfo = Protos.ExecutorInfo.newBuilder().setExecutorId(TestConstants.EXECUTOR_ID).build();

    OfferRecommendation recommendationToLaunch = new LaunchOfferRecommendation(offer, taskInfo, execInfo);
    List<OfferRecommendation> allRecommendations = Arrays.asList(
            new StoreTaskInfoRecommendation(offer, taskInfo, execInfo),
            recommendationToLaunch,
            new StoreTaskInfoRecommendation(offer, taskInfo, execInfo),
            new StoreTaskInfoRecommendation(offer, taskInfo, execInfo));

    PlanScheduler mockPlanScheduler = mock(PlanScheduler.class);
    when(mockPlanScheduler.resourceOffers(any(), any())).thenReturn(allRecommendations);

    PersistentLaunchRecorder mockLaunchRecorder = mock(PersistentLaunchRecorder.class);
    UninstallRecorder mockDecommissionRecorder = mock(UninstallRecorder.class);

    Collection<OfferRecommendation> recommendations = DefaultScheduler.processOffers(
            LoggingUtils.getLogger(getClass()),
            mockPlanScheduler,
            mockLaunchRecorder,
            Optional.of(mockDecommissionRecorder),
            Collections.emptyList(),
            Collections.emptyList()).recommendations;
    Assert.assertEquals(Arrays.asList(
            null,
            Offer.Operation.Type.LAUNCH_GROUP,
            null,
            null),
            recommendations.stream()
                    .map(rec -> rec.getOperation().isPresent() ? rec.getOperation().get().getType() : null)
                    .collect(Collectors.toList()));
    Assert.assertEquals(allRecommendations, recommendations);

    // Meanwhile, ALL of the recommendations (including the two with launch=false) should have been passed to the recorders:
    verify(mockLaunchRecorder).record(allRecommendations);
    verify(mockDecommissionRecorder).recordDecommission(allRecommendations);
}
 
Example 7
Source File: OfferEvaluatorTest.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
@Test
public void testLaunchMultipleTasksPerExecutor() throws Exception {
    Protos.Resource offeredResource = ResourceTestUtils.getUnreservedCpus(3.0);

    ResourceSet resourceSetA = DefaultResourceSet.newBuilder(TestConstants.ROLE, Constants.ANY_ROLE, TestConstants.PRINCIPAL)
            .cpus(1.0)
            .id("resourceSetA")
            .build();
    ResourceSet resourceSetB = DefaultResourceSet.newBuilder(TestConstants.ROLE, Constants.ANY_ROLE, TestConstants.PRINCIPAL)
            .cpus(2.0)
            .id("resourceSetB")
            .build();

    CommandSpec commandSpec = DefaultCommandSpec.newBuilder(Collections.emptyMap())
            .value("./cmd")
            .build();

    TaskSpec taskSpecA = DefaultTaskSpec.newBuilder()
            .name("taskA")
            .commandSpec(commandSpec)
            .goalState(GoalState.RUNNING)
            .resourceSet(resourceSetA)
            .build();
    TaskSpec taskSpecB = DefaultTaskSpec.newBuilder()
            .name("taskB")
            .commandSpec(commandSpec)
            .goalState(GoalState.RUNNING)
            .resourceSet(resourceSetB)
            .build();

    PodSpec podSpec =
            DefaultPodSpec.newBuilder(
                    TestConstants.POD_TYPE,
                    1,
                    Arrays.asList(taskSpecA, taskSpecB))
                    .build();

    PodInstance podInstance = new DefaultPodInstance(podSpec, 0);
    PodInstanceRequirement podInstanceRequirement =
            PodInstanceRequirement.newBuilder(podInstance, Arrays.asList("taskA", "taskB"))
                    .build();

    List<OfferRecommendation> recommendations = evaluator.evaluate(
            podInstanceRequirement,
            Arrays.asList(OfferTestUtils.getCompleteOffer(offeredResource)));

    Assert.assertEquals(Arrays.asList(
            Protos.Offer.Operation.Type.RESERVE,
            Protos.Offer.Operation.Type.RESERVE,
            Protos.Offer.Operation.Type.RESERVE,
            // Validate node task operations
            Protos.Offer.Operation.Type.RESERVE,
            Protos.Offer.Operation.Type.LAUNCH_GROUP,
            null,
            // Validate format task operations
            Protos.Offer.Operation.Type.RESERVE,
            Protos.Offer.Operation.Type.LAUNCH_GROUP,
            null),
            recommendations.stream()
                    .map(rec -> rec.getOperation().isPresent() ? rec.getOperation().get().getType() : null)
                    .collect(Collectors.toList()));

    // TaskInfo.executor is unset in LAUNCH operations, instead it's set at the LaunchGroup level:

    Protos.Offer.Operation operation = recommendations.get(4).getOperation().get();

    Protos.TaskInfo launchTask = operation.getLaunchGroup().getTaskGroup().getTasks(0);
    Assert.assertFalse(launchTask.hasExecutor());
    Assert.assertEquals("pod-type-0-taskA", launchTask.getName());

    Assert.assertTrue(operation.getLaunchGroup().hasExecutor());
    Protos.ExecutorInfo launch0Executor = operation.getLaunchGroup().getExecutor();
    Assert.assertEquals("pod-type", launch0Executor.getName());

    operation = recommendations.get(7).getOperation().get();

    launchTask = operation.getLaunchGroup().getTaskGroup().getTasks(0);
    Assert.assertFalse(launchTask.hasExecutor());
    Assert.assertEquals("pod-type-0-taskB", launchTask.getName());

    Assert.assertTrue(operation.getLaunchGroup().hasExecutor());
    Protos.ExecutorInfo launch1Executor = operation.getLaunchGroup().getExecutor();
    Assert.assertEquals("pod-type", launch1Executor.getName());

    Assert.assertEquals(launch0Executor, launch1Executor);

    // Meanwhile in the update TaskInfos, TaskInfo.executor is set:

    Assert.assertEquals(launch0Executor, ((StoreTaskInfoRecommendation) recommendations.get(5)).getStateStoreTaskInfo().getExecutor());
    Assert.assertEquals(launch1Executor, ((StoreTaskInfoRecommendation) recommendations.get(8)).getStateStoreTaskInfo().getExecutor());
}
 
Example 8
Source File: OfferEvaluator.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
private List<OfferEvaluationStage> getExistingEvaluationPipeline(
    PodInstanceRequirement podInstanceRequirement,
    Map<String, Protos.TaskInfo> podTasks,
    Collection<Protos.TaskInfo> allTasks,
    Protos.ExecutorInfo executorInfo,
    Optional<TLSEvaluationStage.Builder> tlsStageBuilder)
{
  List<OfferEvaluationStage> evaluationStages = new ArrayList<>();

  // TLS evaluation stages should be added for all tasks regardless of the tasks to launch list to ensure
  // ExecutorInfo equality when launching new tasks
  if (tlsStageBuilder.isPresent()) {
    for (TaskSpec taskSpec : podInstanceRequirement.getPodInstance().getPod().getTasks()) {
      if (!taskSpec.getTransportEncryption().isEmpty()) {
        evaluationStages.add(tlsStageBuilder.get().build(taskSpec.getName()));
      }
    }
  }

  if (podInstanceRequirement.getPodInstance().getPod().getPlacementRule().isPresent() &&
      podInstanceRequirement.getRecoveryType().equals(RecoveryType.PERMANENT))
  {
    // If a "pod replace" was issued, ensure that the pod's new location follows any placement rules.
    evaluationStages.add(new PlacementRuleEvaluationStage(
        allTasks, podInstanceRequirement.getPodInstance().getPod().getPlacementRule().get()));
  }

  // Select an arbitrary ResourceSpec from the pod definition to get the role and principal.
  // All ResourceSpecs in a pod share the same role/principal, see YAMLToInternalMappers.
  ResourceSpec resourceSpecForRoleAndPrincipal =
      podInstanceRequirement.getPodInstance().getPod().getTasks().stream()
          .map(taskSpec -> taskSpec.getResourceSet().getResources())
          .filter(resourceSpecs -> !resourceSpecs.isEmpty())
          .findAny()
          .get()
          .iterator().next();
  // Add evaluation for the executor's own resources:
  ExecutorResourceMapper executorResourceMapper = new ExecutorResourceMapper(
      podInstanceRequirement.getPodInstance().getPod(),
      getExecutorResourceSpecs(
          schedulerConfig,
          resourceSpecForRoleAndPrincipal.getRole(),
          resourceSpecForRoleAndPrincipal.getPrincipal(),
          resourceSpecForRoleAndPrincipal.getPreReservedRole()),
      executorInfo.getResourcesList(),
      resourceNamespace,
      frameworkId);
  executorResourceMapper.getOrphanedResources()
      .forEach(resource -> evaluationStages.add(new DestroyEvaluationStage(resource)));
  executorResourceMapper.getOrphanedResources()
      .forEach(resource -> evaluationStages.add(new UnreserveEvaluationStage(resource)));
  evaluationStages.addAll(executorResourceMapper.getEvaluationStages());

  // Evaluate any changes to the task(s):
  evaluationStages.addAll(getExistingTaskEvaluationPipeline(
      podInstanceRequirement, serviceName, resourceNamespace, podTasks, frameworkId));

  return evaluationStages;
}
 
Example 9
Source File: OfferEvaluator.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
public List<OfferEvaluationStage> getEvaluationPipeline(
    PodInstanceRequirement podInstanceRequirement,
    Collection<Protos.TaskInfo> allTasks,
    Map<String, Protos.TaskInfo> thisPodTasks) throws IOException
{

  boolean noLaunchedTasksExist = thisPodTasks.values().stream()
      .flatMap(taskInfo -> taskInfo.getResourcesList().stream())
      .map(ResourceUtils::getResourceId)
      .filter(Optional::isPresent)
      .map(Optional::get)
      .allMatch(String::isEmpty);

  boolean allTasksPermanentlyFailed = thisPodTasks.size() > 0 &&
      thisPodTasks.values().stream().allMatch(FailureUtils::isPermanentlyFailed);

  final String description;
  final boolean shouldGetNewRequirement;
  if (podInstanceRequirement.getRecoveryType().equals(RecoveryType.PERMANENT) || allTasksPermanentlyFailed) {
    description = "failed";
    shouldGetNewRequirement = true;
  } else if (noLaunchedTasksExist) {
    description = "new";
    shouldGetNewRequirement = true;
  } else {
    description = "existing";
    shouldGetNewRequirement = false;
  }
  logger.info("Generating requirement for {} pod '{}' containing tasks: {}",
      description,
      podInstanceRequirement.getPodInstance().getName(),
      podInstanceRequirement.getTasksToLaunch());

  // Only create a TLS Evaluation Stage builder if the service actually uses TLS certs.
  // This avoids performing TLS cert generation in cases where the cluster may not support it (e.g. DC/OS Open).
  boolean anyTasksWithTLS = podInstanceRequirement.getPodInstance().getPod().getTasks().stream()
      .anyMatch(taskSpec -> !taskSpec.getTransportEncryption().isEmpty());
  Optional<TLSEvaluationStage.Builder> tlsStageBuilder = anyTasksWithTLS
      ? Optional.of(new TLSEvaluationStage.Builder(serviceName, schedulerConfig))
      : Optional.empty();

  List<OfferEvaluationStage> evaluationPipeline = new ArrayList<>();
  if (shouldGetNewRequirement) {
    evaluationPipeline.addAll(getNewEvaluationPipeline(podInstanceRequirement, allTasks, tlsStageBuilder));
  } else {
    Protos.ExecutorInfo executorInfo = getExecutorInfo(podInstanceRequirement, thisPodTasks.values());

    // An empty ExecutorID indicates we should use a new Executor, otherwise we should attempt to launch
    // tasks on an already running Executor.
    String executorIdString = executorInfo.getExecutorId().getValue();
    Optional<Protos.ExecutorID> executorID = executorIdString.isEmpty() ?
        Optional.empty() :
        Optional.of(executorInfo.getExecutorId());

    evaluationPipeline.add(new ExecutorEvaluationStage(serviceName, executorID));
    evaluationPipeline.addAll(getExistingEvaluationPipeline(
        podInstanceRequirement, thisPodTasks, allTasks, executorInfo, tlsStageBuilder));
  }

  return evaluationPipeline;
}
 
Example 10
Source File: TaskInfoBuilder.java    From logstash with Apache License 2.0 4 votes vote down vote up
private Protos.TaskInfo buildDockerTask(String taskId, Protos.Offer offer) {
    String executorImage = logstashConfig.getExecutorImage() + ":" + logstashConfig.getExecutorVersion();

    Protos.ContainerInfo.DockerInfo.Builder dockerExecutor = Protos.ContainerInfo.DockerInfo
            .newBuilder()
            .setForcePullImage(false)
            .setNetwork(Protos.ContainerInfo.DockerInfo.Network.BRIDGE)
            .setImage(executorImage);

    if (features.isSyslog()) {
        dockerExecutor.addPortMappings(Protos.ContainerInfo.DockerInfo.PortMapping.newBuilder().setHostPort(logstashConfig.getSyslogPort()).setContainerPort(logstashConfig.getSyslogPort()).setProtocol("udp"));
    }
    if (features.isCollectd()) {
        dockerExecutor.addPortMappings(Protos.ContainerInfo.DockerInfo.PortMapping.newBuilder().setHostPort(logstashConfig.getCollectdPort()).setContainerPort(logstashConfig.getCollectdPort()).setProtocol("udp"));
    }

    Protos.ContainerInfo.Builder container = Protos.ContainerInfo.newBuilder()
            .setType(Protos.ContainerInfo.Type.DOCKER)
            .setDocker(dockerExecutor.build());
    if (features.isFile()) {
        container.addVolumes(Protos.Volume.newBuilder().setHostPath("/").setContainerPath("/logstashpaths").setMode(Protos.Volume.Mode.RO).build());
    }

    ExecutorEnvironmentalVariables executorEnvVars = new ExecutorEnvironmentalVariables(
            executorConfig, logstashConfig);
    executorEnvVars.addToList(ExecutorEnvironmentalVariables.LOGSTASH_PATH, "/opt/logstash/bin/logstash");

    Protos.ExecutorInfo executorInfo = Protos.ExecutorInfo.newBuilder()
            .setName(LogstashConstants.NODE_NAME + " executor")
            .setExecutorId(Protos.ExecutorID.newBuilder().setValue("executor." + UUID.randomUUID()))
            .setContainer(container)
            .setCommand(Protos.CommandInfo.newBuilder()
                    .addArguments("dummyArgument")
                    .setContainer(Protos.CommandInfo.ContainerInfo.newBuilder()
                            .setImage(executorImage).build())
                    .setEnvironment(Protos.Environment.newBuilder()
                            .addAllVariables(executorEnvVars.getList()))
                    .setShell(false))
            .build();

    return createTask(taskId, offer, executorInfo);
}
 
Example 11
Source File: ClusterTaskOfferRequirementProviderTest.java    From dcos-cassandra-service with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetNewOfferRequirement() throws Exception {
    OfferRequirement requirement = provider.getNewOfferRequirement(
            CassandraTask.TYPE.CASSANDRA_DAEMON.name(),
            testTaskInfo);
    Protos.TaskInfo taskInfo = requirement.getTaskRequirements().iterator().next().getTaskInfo();
    Assert.assertEquals(taskInfo.getName(), "test-daemon");
    Assert.assertTrue(taskInfo.getTaskId().getValue().contains("test-daemon"));
    Assert.assertEquals(taskInfo.getSlaveId().getValue(), "");

    List<Protos.Resource> resources = taskInfo.getResourcesList();
    Assert.assertEquals(4, resources.size());

    Protos.Resource cpusResource = resources.get(0);
    Assert.assertEquals("cpus", cpusResource.getName());
    Assert.assertEquals(Protos.Value.Type.SCALAR, cpusResource.getType());
    Assert.assertEquals(testCpus, cpusResource.getScalar().getValue(), 0.0);
    Assert.assertEquals(testRole, cpusResource.getRole());
    Assert.assertEquals(testPrincipal, cpusResource.getReservation().getPrincipal());
    Assert.assertEquals("resource_id", cpusResource.getReservation().getLabels().getLabelsList().get(0).getKey());
    Assert.assertEquals(testResourceId, cpusResource.getReservation().getLabels().getLabelsList().get(0).getValue());

    Protos.Resource memResource = resources.get(1);
    Assert.assertEquals("mem", memResource.getName());
    Assert.assertEquals(Protos.Value.Type.SCALAR, memResource.getType());
    Assert.assertEquals(testMem, memResource.getScalar().getValue(), 0.0);
    Assert.assertEquals(testRole, memResource.getRole());
    Assert.assertEquals(testPrincipal, memResource.getReservation().getPrincipal());
    Assert.assertEquals("resource_id", memResource.getReservation().getLabels().getLabelsList().get(0).getKey());
    Assert.assertEquals(testResourceId, memResource.getReservation().getLabels().getLabelsList().get(0).getValue());

    Protos.Resource diskResource = resources.get(2);
    Assert.assertEquals("disk", diskResource.getName());
    Assert.assertEquals(Protos.Value.Type.SCALAR, diskResource.getType());
    Assert.assertEquals(testDisk, diskResource.getScalar().getValue(), 0.0);
    Assert.assertEquals(testRole, diskResource.getRole());
    Assert.assertEquals(testPrincipal, diskResource.getReservation().getPrincipal());
    Assert.assertEquals("resource_id", diskResource.getReservation().getLabels().getLabelsList().get(0).getKey());
    Assert.assertEquals(testResourceId, diskResource.getReservation().getLabels().getLabelsList().get(0).getValue());

    Protos.Resource portsResource = resources.get(3);
    Assert.assertEquals("ports", portsResource.getName());
    Assert.assertEquals(Protos.Value.Type.RANGES, portsResource.getType());
    Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getBegin() >= testPortBegin);
    Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getEnd() >= testPortBegin);
    Assert.assertEquals(testRole, portsResource.getRole());
    Assert.assertEquals(testPrincipal, portsResource.getReservation().getPrincipal());
    Assert.assertEquals("resource_id", portsResource.getReservation().getLabels().getLabelsList().get(0).getKey());
    Assert.assertEquals(testResourceId, portsResource.getReservation().getLabels().getLabelsList().get(0).getValue());

    final Protos.ExecutorInfo executorInfo = requirement.getExecutorRequirementOptional().get().getExecutorInfo();

    Protos.CommandInfo cmd = executorInfo.getCommand();
    Assert.assertEquals(4, cmd.getUrisList().size());

    List<Protos.CommandInfo.URI> urisList = new ArrayList<>(cmd.getUrisList());
    urisList.sort((a, b) -> a.getValue().compareTo(b.getValue()));
    Assert.assertEquals(
        config.getExecutorConfig().getLibmesosLocation().toString(),
        urisList.get(0).getValue());
    Assert.assertEquals(
        config.getExecutorConfig().getJreLocation().toString(),
        urisList.get(1).getValue());
    Assert.assertEquals(
        config.getExecutorConfig().getCassandraLocation().toString(),
        urisList.get(2).getValue());
    Assert.assertEquals(
        config.getExecutorConfig().getExecutorLocation().toString(),
        urisList.get(3).getValue());
}
 
Example 12
Source File: TaskInfoBuilder.java    From logstash with Apache License 2.0 4 votes vote down vote up
private Protos.TaskInfo createTask(String taskId, Protos.Offer offer, Protos.ExecutorInfo executorInfo) {
    ExecutorBootConfiguration bootConfiguration = new ExecutorBootConfiguration(offer.getSlaveId().getValue());

    bootConfiguration.setElasticSearchHosts(logstashConfig.getElasticsearchHost());

    try {
        String template = StreamUtils.copyToString(
                Optional.ofNullable(logstashConfig.getConfigFile())
                        .map(file -> (InputStream) getFileInputStream(file))
                        .orElseGet(() -> getClass().getResourceAsStream("/default_logstash.config.fm")), StandardCharsets.UTF_8);

        LOGGER.debug("Template: " + template);
        bootConfiguration.setLogstashConfigTemplate(template);
        bootConfiguration.setLogstashStartConfigTemplate(StreamUtils.copyToString(getClass().getResourceAsStream("/startup_logstash.config.fm"), StandardCharsets.UTF_8));
    } catch (IOException e) {
        throw new RuntimeException("Failed to open template", e);
    }

    if (features.isSyslog()) {
        bootConfiguration.setEnableSyslog(true);
        bootConfiguration.setSyslogPort(logstashConfig.getSyslogPort());
    }
    if (features.isCollectd()) {
        bootConfiguration.setEnableCollectd(true);
        bootConfiguration.setCollectdPort(logstashConfig.getCollectdPort());
    }

    if (features.isFile()) {
        bootConfiguration.setEnableFile(true);
        bootConfiguration.setFilePaths(executorConfig.getFilePath().stream().toArray(String[]::new));
    }

    return Protos.TaskInfo.newBuilder()
            .setExecutor(executorInfo)
            .addAllResources(getResourcesList())
            .setName(LogstashConstants.TASK_NAME)
            .setTaskId(Protos.TaskID.newBuilder().setValue(taskId))
            .setSlaveId(offer.getSlaveId())
            .setData(ByteString.copyFrom(SerializationUtils.serialize(bootConfiguration)))
            .build();
}
 
Example 13
Source File: TaskTestUtils.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
public static Protos.ExecutorInfo getExecutorInfo(Protos.Resource resource) {
    return getExecutorInfo(Arrays.asList(resource));
}
 
Example 14
Source File: OfferEvaluatorVolumesTest.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
@Test
public void testExpectedMountVolume() throws Exception {
    // Launch for the first time.
    Resource offeredCpuResource = ResourceTestUtils.getUnreservedCpus(1.0);
    Resource offeredDiskResource = ResourceTestUtils.getUnreservedMountVolume(2000, Optional.empty());

    PodInstanceRequirement podInstanceRequirement =
            PodInstanceRequirementTestUtils.getMountVolumeRequirement(1.0, 1500);
    List<OfferRecommendation> recommendations = evaluator.evaluate(
            podInstanceRequirement,
            Arrays.asList(OfferTestUtils.getCompleteOffer(Arrays.asList(offeredDiskResource, offeredCpuResource))));

    String executorCpuResourceId = ResourceTestUtils.getResourceId(
            recommendations.get(0).getOperation().get().getReserve().getResources(0));
    String executorDiskResourceId = ResourceTestUtils.getResourceId(
            recommendations.get(1).getOperation().get().getReserve().getResources(0));
    String executorMemResourceId = ResourceTestUtils.getResourceId(
            recommendations.get(2).getOperation().get().getReserve().getResources(0));
    String cpuResourceId = ResourceTestUtils.getResourceId(
            recommendations.get(3).getOperation().get().getReserve().getResources(0));
    Resource createResource = recommendations.get(5).getOperation().get().getCreate().getVolumes(0);

    String diskResourceId = ResourceTestUtils.getResourceId(createResource);
    String persistenceId = ResourceTestUtils.getPersistenceId(createResource);

    // Last entry is a StoreTaskInfoRecommendation, which doesn't have an Operation:
    Operation launchOperation = recommendations.get(recommendations.size()-2).getOperation().get();
    Protos.ExecutorInfo executorInfo = launchOperation.getLaunchGroup().getExecutor();
    Collection<Protos.TaskInfo> taskInfos = launchOperation.getLaunchGroup().getTaskGroup().getTasksList().stream()
            .map(t -> t.toBuilder().setExecutor(executorInfo).build())
            .collect(Collectors.toList());
    stateStore.storeTasks(taskInfos);


    // Launch again on expected resources.
    Resource expectedCpu = ResourceTestUtils.getReservedCpus(1.0, cpuResourceId);
    Resource expectedDisk =
            ResourceTestUtils.getReservedMountVolume(2000, Optional.empty(), diskResourceId, persistenceId);
    Resource expectedExecutorCpu = ResourceTestUtils.getReservedCpus(0.1, executorCpuResourceId);
    Resource expectedExecutorMem = ResourceTestUtils.getReservedMem(32, executorMemResourceId);
    Resource expectedExecutorDisk = ResourceTestUtils.getReservedDisk(256, executorDiskResourceId);
    recommendations = evaluator.evaluate(
            podInstanceRequirement,
            Arrays.asList(OfferTestUtils.getCompleteOffer(Arrays.asList(
                    expectedCpu, expectedDisk, expectedExecutorCpu, expectedExecutorMem, expectedExecutorDisk))));

    // Launch + StoreTask:
    Assert.assertEquals(2, recommendations.size());

    launchOperation = recommendations.get(0).getOperation().get();
    Protos.TaskInfo launchTask = launchOperation.getLaunchGroup().getTaskGroup().getTasks(0);
    Assert.assertEquals(recommendations.toString(), 2, launchTask.getResourcesCount());
    Resource launchResource = launchTask.getResources(1);

    Assert.assertEquals(Operation.Type.LAUNCH_GROUP, launchOperation.getType());
    Assert.assertEquals(2000, launchResource.getScalar().getValue(), 0.0);
    OfferEvaluatorTest.validateRole(launchResource);
    Assert.assertEquals(TestConstants.ROLE, ResourceUtils.getRole(launchResource));
    Assert.assertEquals(TestConstants.MOUNT_DISK_SOURCE, launchResource.getDisk().getSource());
    Assert.assertEquals(persistenceId, launchResource.getDisk().getPersistence().getId());
    Assert.assertEquals(TestConstants.PRINCIPAL, launchResource.getDisk().getPersistence().getPrincipal());
    validatePrincipal(launchResource);
    Assert.assertEquals(diskResourceId, getResourceId(launchResource));

    Assert.assertFalse(recommendations.get(1).getOperation().isPresent());
}
 
Example 15
Source File: NodeTask.java    From incubator-myriad with Apache License 2.0 4 votes vote down vote up
public void setExecutorInfo(Protos.ExecutorInfo executorInfo) {
  this.executorInfo = executorInfo;
}
 
Example 16
Source File: TaskTestUtils.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
private static Protos.ExecutorInfo getExecutorInfo(List<Protos.Resource> resources) {
    return getExecutorInfoBuilder().addAllResources(resources).build();
}
 
Example 17
Source File: CassandraTaskExecutor.java    From dcos-cassandra-service with Apache License 2.0 4 votes vote down vote up
CassandraTaskExecutor(final Protos.ExecutorInfo info) {
    this.info = info;
}
 
Example 18
Source File: LaunchedTask.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
public LaunchedTask(Protos.ExecutorInfo executorInfo, Protos.TaskInfo taskInfo) {
  this.executorInfo = executorInfo;
  this.taskInfo = taskInfo;
}
 
Example 19
Source File: DefaultV3TaskInfoRequestFactory.java    From titus-control-plane with Apache License 2.0 4 votes vote down vote up
private Protos.TaskInfo.Builder newTaskInfoBuilder(Protos.TaskID taskId,
                                                   Protos.ExecutorInfo executorInfo,
                                                   Protos.SlaveID slaveID,
                                                   TitusQueuableTask<Job, Task> fenzoTask,
                                                   Job<?> job,
                                                   Task task
) {

    // use requested CPUs rather than what Fenzo assigned, since some CPUs could have been scheduled
    // opportunistically (oversubscribed)
    double requestedCpus = job.getJobDescriptor().getContainer().getContainerResources().getCpu();
    Protos.TaskInfo.Builder builder = Protos.TaskInfo.newBuilder()
            .setTaskId(taskId)
            .setName(taskId.getValue())
            .setExecutor(executorInfo)
            .setSlaveId(slaveID)
            .addResources(Protos.Resource.newBuilder()
                    .setName("cpus")
                    .setType(Protos.Value.Type.SCALAR)
                    .setScalar(Protos.Value.Scalar.newBuilder().setValue(requestedCpus).build()))
            .addResources(Protos.Resource.newBuilder()
                    .setName("mem")
                    .setType(Protos.Value.Type.SCALAR)
                    .setScalar(Protos.Value.Scalar.newBuilder().setValue(fenzoTask.getMemory()).build()))
            .addResources(Protos.Resource.newBuilder()
                    .setName("disk")
                    .setType(Protos.Value.Type.SCALAR)
                    .setScalar(Protos.Value.Scalar.newBuilder().setValue(fenzoTask.getDisk()).build()))
            .addResources(Protos.Resource.newBuilder()
                    .setName("network")
                    .setType(Protos.Value.Type.SCALAR)
                    .setScalar(Protos.Value.Scalar.newBuilder().setValue(fenzoTask.getNetworkMbps())));

    if (fenzoTask.isCpuOpportunistic() && fenzoTask.getOpportunisticCpus() > 0) {
        builder.addResources(Protos.Resource.newBuilder()
                .setName("opportunisticCpus")
                .setType(Protos.Value.Type.SCALAR)
                .setScalar(Protos.Value.Scalar.newBuilder().setValue(fenzoTask.getOpportunisticCpus()).build())
        );
    }

    // set scalars other than cpus, mem, disk
    final Map<String, Double> scalars = fenzoTask.getScalarRequests();
    if (scalars != null && !scalars.isEmpty()) {
        for (Map.Entry<String, Double> entry : scalars.entrySet()) {
            if (!Container.PRIMARY_RESOURCES.contains(entry.getKey())) { // Already set above
                builder.addResources(Protos.Resource.newBuilder()
                        .setName(entry.getKey())
                        .setType(Protos.Value.Type.SCALAR)
                        .setScalar(Protos.Value.Scalar.newBuilder().setValue(entry.getValue()).build())
                );
            }
        }
    }

    return builder;
}
 
Example 20
Source File: Expect.java    From dcos-commons with Apache License 2.0 4 votes vote down vote up
/**
 * Verifies that a pod was launched with exactly the provided task names over the last N accept calls. If the last
 * offer cycle had multiple offers from different agents, then separate accept calls are made on a per-agent basis.
 */
public static Expect launchedTasks(int acceptsToCheck, Collection<String> taskNames) {
  return new Expect() {
    // Use this form instead of using ArgumentCaptor.forClass() to avoid problems with typecasting generics:
    @Captor
    private ArgumentCaptor<Collection<Protos.OfferID>> offerIdsCaptor;

    @Captor
    private ArgumentCaptor<Collection<Protos.Offer.Operation>> operationsCaptor;

    @Override
    public void expect(ClusterState state, SchedulerDriver mockDriver) {
      MockitoAnnotations.initMocks(this);

      // Get the params from the last N accept calls:
      Mockito.verify(mockDriver, Mockito.atLeast(acceptsToCheck))
          .acceptOffers(offerIdsCaptor.capture(), operationsCaptor.capture(), any());
      // With the above retrieval, we will have >=acceptsToCheck calls in forward chronological order.
      // We need to manually cut that down to just the LAST acceptsToCheck calls:
      List<Collection<Protos.OfferID>> allOfferIdAcceptCalls = offerIdsCaptor.getAllValues();
      Collection<String> acceptedOfferIds = allOfferIdAcceptCalls
          .subList(allOfferIdAcceptCalls.size() - acceptsToCheck, allOfferIdAcceptCalls.size())
          .stream()
          .flatMap(Collection::stream)
          .map(Protos.OfferID::getValue)
          .collect(Collectors.toList());

      List<Collection<Protos.Offer.Operation>> allOperationAcceptCalls = operationsCaptor.getAllValues();
      List<Collection<Protos.Offer.Operation>> selectedOperationAcceptCalls = allOperationAcceptCalls
          .subList(allOperationAcceptCalls.size() - acceptsToCheck, allOperationAcceptCalls.size());

      // As a sanity check, verify that the accepted ids were all from the most recent offer cycle. This
      // ensures that we aren't looking at accepted offers from a prior offer cycle.
      Set<String> lastCycleOfferIds = state.getLastOfferCycle().stream()
          .map(o -> o.getId().getValue())
          .collect(Collectors.toSet());
      Assert.assertTrue(String.format(
          "Expected last accepted offer in last offer cycle: %s, but last %d accepted %s %s",
          lastCycleOfferIds,
          acceptsToCheck,
          acceptsToCheck == 1 ? "offer was" : "offers were",
          acceptedOfferIds),
          lastCycleOfferIds.containsAll(acceptedOfferIds));

      // Check (and capture) task launch operations:
      Collection<String> launchedTaskNames = new ArrayList<>();
      // Iterate over acceptOffers() calls, one per agent:
      for (Collection<Protos.Offer.Operation> acceptCallOperations : selectedOperationAcceptCalls) {
        // A single acceptOffers() call may contain multiple LAUNCH_GROUP operations.
        // We want to ensure they're all counted as a unit when tallying the pod.
        // TODO(nickbp): DCOS-37508 We currently produce multiple LAUNCH_GROUPs (each with identical copies
        // of the same ExecutorInfo) when launching multiple tasks in a pod. As a temporary measure, this
        // de-dupes executors by their ExecutorID. Remove this de-dupe once DCOS-37508 is fixed.
        Map<String, Protos.ExecutorInfo> executorsById = new HashMap<>();
        Collection<Protos.TaskInfo> launchedTaskInfos = new ArrayList<>();
        Collection<Protos.Resource> reservedResources = new ArrayList<>();
        for (Protos.Offer.Operation operation : acceptCallOperations) {
          switch (operation.getType()) {
            case LAUNCH_GROUP: {
              Protos.ExecutorInfo executor = operation.getLaunchGroup().getExecutor();
              executorsById.put(executor.getExecutorId().getValue(), executor);

              Collection<Protos.TaskInfo> taskInfos =
                  operation.getLaunchGroup().getTaskGroup().getTasksList();

              launchedTaskNames.addAll(taskInfos.stream()
                  .map(task -> task.getName())
                  .collect(Collectors.toList()));
              launchedTaskInfos.addAll(taskInfos);
              break;
            }
            case RESERVE:
              reservedResources.addAll(operation.getReserve().getResourcesList());
              break;
            default:
              break;
          }
        }
        // Record the accept operation if anything happened:
        if (!executorsById.isEmpty() || !launchedTaskInfos.isEmpty() || !reservedResources.isEmpty()) {
          state.addAcceptCall(
              new AcceptEntry(executorsById.values(), launchedTaskInfos, reservedResources));
        }
      }

      // Finally, verify that exactly the expected tasks were launched across these acceptOffers() calls:
      Assert.assertTrue(
          String.format("Expected launched tasks: %s, got tasks: %s", taskNames, launchedTaskNames),
          launchedTaskNames.containsAll(taskNames) && taskNames.containsAll(launchedTaskNames));
    }

    @Override
    public String getDescription() {
      return String.format("Tasks were launched into a pod: %s", taskNames);
    }
  };
}