Java Code Examples for com.datatorrent.stram.plan.logical.LogicalPlan#setAttribute()

The following examples show how to use com.datatorrent.stram.plan.logical.LogicalPlan#setAttribute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AtLeastOnceTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testInputOperatorRecovery() throws Exception
{
  RecoverableInputOperator.initGenTuples();
  CollectorOperator.collection.clear();
  int maxTuples = 30;
  LogicalPlan dag = new LogicalPlan();
  String workingDir = new File("target/testInputOperatorRecovery").getAbsolutePath();
  AsyncFSStorageAgent asyncFSStorageAgent = new AsyncFSStorageAgent(workingDir, null);
  asyncFSStorageAgent.setSyncCheckpoint(true);
  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, asyncFSStorageAgent);
  dag.getAttributes().put(LogicalPlan.CHECKPOINT_WINDOW_COUNT, 2);
  dag.getAttributes().put(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS, 300);
  dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 1);
  RecoverableInputOperator rip = dag.addOperator("LongGenerator", RecoverableInputOperator.class);
  rip.setMaximumTuples(maxTuples);
  rip.setSimulateFailure(true);

  CollectorOperator cm = dag.addOperator("LongCollector", CollectorOperator.class);
  dag.addStream("connection", rip.output, cm.input);

  StramLocalCluster lc = new StramLocalCluster(dag);
  lc.run();

  Assert.assertEquals("Generated Outputs", maxTuples, CollectorOperator.collection.size());
}
 
Example 2
Source File: StramAppLauncher.java    From Bats with Apache License 2.0 6 votes vote down vote up
private void setTokenRefreshCredentials(LogicalPlan dag, Configuration conf) throws IOException
{
  String principal = conf.get(StramClientUtils.TOKEN_REFRESH_PRINCIPAL, StramUserLogin.getPrincipal());
  String keytabPath = conf.get(StramClientUtils.TOKEN_REFRESH_KEYTAB, conf.get(StramClientUtils.KEY_TAB_FILE));
  if (keytabPath == null) {
    String keytab = StramUserLogin.getKeytab();
    if (keytab != null) {
      Path localKeyTabPath = new Path(keytab);
      try (FileSystem fs = StramClientUtils.newFileSystemInstance(conf)) {
        Path destPath = new Path(StramClientUtils.getApexDFSRootDir(fs, conf), localKeyTabPath.getName());
        if (!fs.exists(destPath)) {
          fs.copyFromLocalFile(false, false, localKeyTabPath, destPath);
        }
        keytabPath = destPath.toString();
      }
    }
  }
  LOG.debug("User principal is {}, keytab is {}", principal, keytabPath);
  if ((principal != null) && (keytabPath != null)) {
    dag.setAttribute(LogicalPlan.PRINCIPAL, principal);
    dag.setAttribute(LogicalPlan.KEY_TAB_FILE, keytabPath);
  } else {
    LOG.warn("Credentials for refreshing tokens not available, application may not be able to run indefinitely");
  }
}
 
Example 3
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testContainerSizeWithPartitioning()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 5, plan.getContainers().size());
  PTContainer container;
  for (int i = 0; i < 5; i++) {
    container = plan.getContainers().get(i);
    if (container.getOperators().size() == 1) {
      Assert.assertEquals("container memory is 1536 for container :" + container, 1536, container.getRequiredMemoryMB());
    }
    if (container.getOperators().size() == 2) {
      Assert.assertEquals("container memory is 2048 for container :" + container, 2048, container.getRequiredMemoryMB());
    }
  }
}
 
Example 4
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testMxNPartitionForSlidingWindow()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);
  dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.getOperatorMeta("o1").getMeta(o1.outport1).getUnifierMeta().getAttributes().put(OperatorContext.MEMORY_MB, 1024);
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.setOperatorAttribute(o2, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o2, OperatorContext.APPLICATION_WINDOW_COUNT, 4);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.addStream("o2.outport1", o2.outport1, o3.inport1);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 9, plan.getContainers().size());
}
 
Example 5
Source File: StramMiniClusterTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddAttributeToArgs() throws Exception
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(LogicalPlan.APPLICATION_NAME, APP_NAME);
  AddAttributeToArgsOperator operator = dag.addOperator("test", AddAttributeToArgsOperator.class);
  dag.getContextAttributes(operator).put(OperatorContext.RECOVERY_ATTEMPTS, 0);

  StramClient client = new StramClient(conf, dag);
  try {
    client.start();
    client.startApplication();
    Assert.assertTrue(client.monitorApplication());
  } finally {
    client.stop();
  }
}
 
Example 6
Source File: OiOEndWindowTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void validateOiOImplementation() throws Exception
{
  LogicalPlan lp = new LogicalPlan();
  String workingDir = new File("target/validateOiOImplementation").getAbsolutePath();
  lp.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(workingDir, null));
  TestInputOperator io = lp.addOperator("Input Operator", new TestInputOperator());
  FirstGenericOperator go = lp.addOperator("First Generic Operator", new FirstGenericOperator());
  SecondGenericOperator out = lp.addOperator("Second Generic Operator", new SecondGenericOperator());

  /*
   * This tests make sure that even if the application_window_count is different the endWindow() is called for
   * end_stream
   */
  lp.getOperatorMeta("Second Generic Operator").getAttributes().put(Context.OperatorContext.APPLICATION_WINDOW_COUNT, 2);
  StreamMeta stream = lp.addStream("Stream", io.output, go.input);
  StreamMeta stream1 = lp.addStream("Stream1", go.output, out.input);

  stream1.setLocality(Locality.THREAD_LOCAL);
  lp.validate();
  StramLocalCluster slc = new StramLocalCluster(lp);
  slc.run();
  Assert.assertEquals("End Window Count", FirstGenericOperator.endwindowCount, SecondGenericOperator.endwindowCount);
}
 
Example 7
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testNumberOfUnifiers()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
  GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
  GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
  dag.addStream("node1.outport1", node1.outport1, node2.inport1);
  dag.setOperatorAttribute(node1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5));
  dag.setOutputPortAttribute(node1.outport1, PortContext.UNIFIER_LIMIT, 3);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  List<PTContainer> containers = plan.getContainers();
  int unifierCount = 0;
  int totalOperators = 0;
  for (PTContainer container : containers) {
    List<PTOperator> operators = container.getOperators();
    for (PTOperator operator : operators) {
      totalOperators++;
      if (operator.isUnifier()) {
        unifierCount++;
      }
    }
  }
  Assert.assertEquals("Number of operators", 8, totalOperators);
  Assert.assertEquals("Number of unifiers", 2, unifierCount);
}
 
Example 8
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testParallelPartitionForSlidingWindow()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.setInputPortAttribute(o2.inport1, PortContext.PARTITION_PARALLEL, true);
  dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.addStream("o2.outport1", o2.outport1, o3.inport1);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 7, plan.getContainers().size());
}
 
Example 9
Source File: AtLeastOnceTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
  public void testInlineOperatorsRecovery() throws Exception
  {
    RecoverableInputOperator.initGenTuples();
    CollectorOperator.collection.clear();
    int maxTuples = 30;
    LogicalPlan dag = new LogicalPlan();
    String workingDir = new File("target/testOperatorRecovery").getAbsolutePath();
    AsyncFSStorageAgent asyncFSStorageAgent = new AsyncFSStorageAgent(workingDir, null);
    asyncFSStorageAgent.setSyncCheckpoint(true);
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, asyncFSStorageAgent);
    //dag.getAttributes().get(DAG.HEARTBEAT_INTERVAL_MILLIS, 400);
    dag.getAttributes().put(LogicalPlan.CHECKPOINT_WINDOW_COUNT, 2);
    dag.getAttributes().put(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS, 300);
    dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 1);
    RecoverableInputOperator rip = dag.addOperator("LongGenerator", RecoverableInputOperator.class);
    rip.setMaximumTuples(maxTuples);
    rip.setSimulateFailure(true);

    CollectorOperator cm = dag.addOperator("LongCollector", CollectorOperator.class);
    cm.setSimulateFailure(true);
    dag.addStream("connection", rip.output, cm.input).setLocality(Locality.CONTAINER_LOCAL);

    StramLocalCluster lc = new StramLocalCluster(dag);
    lc.run();

//    for (Long l: collection) {
//      logger.debug(Codec.getStringWindowId(l));
//    }
    Assert.assertEquals("Generated Outputs", maxTuples, CollectorOperator.collection.size());
  }
 
Example 10
Source File: StramMiniClusterTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
private LogicalPlan createDAG(LogicalPlanConfiguration lpc) throws Exception
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(LogicalPlan.APPLICATION_PATH, testMeta.toURI().toString());
  lpc.prepareDAG(dag, null, "testApp");
  dag.validate();
  Assert.assertEquals("", Integer.valueOf(128), dag.getValue(DAG.MASTER_MEMORY_MB));
  Assert.assertEquals("", "-Dlog4j.properties=custom_log4j.properties", dag.getValue(DAG.CONTAINER_JVM_OPTIONS));
  return dag;
}
 
Example 11
Source File: AtLeastOnceTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testOperatorRecovery() throws Exception
{
  RecoverableInputOperator.initGenTuples();
  CollectorOperator.collection.clear();
  int maxTuples = 30;
  LogicalPlan dag = new LogicalPlan();
  String workingDir = new File("target/testOperatorRecovery").getAbsolutePath();
  AsyncFSStorageAgent asyncFSStorageAgent = new AsyncFSStorageAgent(workingDir, null);
  asyncFSStorageAgent.setSyncCheckpoint(true);
  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, asyncFSStorageAgent);
  dag.getAttributes().put(LogicalPlan.CHECKPOINT_WINDOW_COUNT, 2);
  dag.getAttributes().put(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS, 300);
  dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 1);
  RecoverableInputOperator rip = dag.addOperator("LongGenerator", RecoverableInputOperator.class);
  rip.setMaximumTuples(maxTuples);
  rip.setSimulateFailure(true);

  CollectorOperator cm = dag.addOperator("LongCollector", CollectorOperator.class);
  cm.setSimulateFailure(true);
  dag.addStream("connection", rip.output, cm.input);

  StramLocalCluster lc = new StramLocalCluster(dag);
  lc.run();

  Assert.assertEquals("Generated Outputs", maxTuples, CollectorOperator.collection.size());
}
 
Example 12
Source File: OiOStreamTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void validatePositiveOiOiOdiamondWithCores()
{
  logger.info("Checking the logic for sanity checking of OiO");

  LogicalPlan plan = new LogicalPlan();
  ThreadIdValidatingInputOperator inputOperator = plan.addOperator("inputOperator", new ThreadIdValidatingInputOperator());
  ThreadIdValidatingGenericIntermediateOperator intermediateOperator1 = plan.addOperator("intermediateOperator1", new ThreadIdValidatingGenericIntermediateOperator());
  ThreadIdValidatingGenericIntermediateOperator intermediateOperator2 = plan.addOperator("intermediateOperator2", new ThreadIdValidatingGenericIntermediateOperator());
  ThreadIdValidatingGenericIntermediateOperator intermediateOperator3 = plan.addOperator("intermediateOperator3", new ThreadIdValidatingGenericIntermediateOperator());
  ThreadIdValidatingGenericIntermediateOperator intermediateOperator4 = plan.addOperator("intermediateOperator4", new ThreadIdValidatingGenericIntermediateOperator());
  ThreadIdValidatingGenericOperatorWithTwoInputPorts outputOperator = plan.addOperator("outputOperator", new ThreadIdValidatingGenericOperatorWithTwoInputPorts());

  plan.addStream("OiOin", inputOperator.output, intermediateOperator1.input, intermediateOperator3.input).setLocality(Locality.THREAD_LOCAL);
  plan.addStream("OiOIntermediate1", intermediateOperator1.output, intermediateOperator2.input).setLocality(Locality.THREAD_LOCAL);
  plan.addStream("OiOIntermediate2", intermediateOperator3.output, intermediateOperator4.input).setLocality(Locality.THREAD_LOCAL);
  plan.addStream("OiOout1", intermediateOperator2.output, outputOperator.input).setLocality(Locality.THREAD_LOCAL);
  plan.addStream("OiOout2", intermediateOperator4.output, outputOperator.input2).setLocality(Locality.THREAD_LOCAL);

  plan.setOperatorAttribute(inputOperator, OperatorContext.VCORES, 1);
  plan.setOperatorAttribute(intermediateOperator1, OperatorContext.VCORES, 1);
  plan.setOperatorAttribute(intermediateOperator2, OperatorContext.VCORES, 2);
  plan.setOperatorAttribute(intermediateOperator3, OperatorContext.VCORES, 3);
  plan.setOperatorAttribute(intermediateOperator4, OperatorContext.VCORES, 5);
  plan.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  try {
    plan.validate();
    Assert.assertTrue("OiOiO extended diamond validation", true);
  } catch (ConstraintViolationException ex) {
    Assert.fail("OIOIO extended diamond validation");
  }
  PhysicalPlan physicalPlan = new PhysicalPlan(plan, new TestPlanContext());
  Assert.assertTrue("number of containers", 1 == physicalPlan.getContainers().size());
  Assert.assertTrue("number of vcores " + physicalPlan.getContainers().get(0).getRequiredVCores(), 5 == physicalPlan.getContainers().get(0).getRequiredVCores());
}
 
Example 13
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testContainersForSlidingWindow()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);
  dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.getOperatorMeta("o1").getMeta(o1.outport1).getUnifierMeta().getAttributes().put(OperatorContext.MEMORY_MB, 2000);
  dag.getOperatorMeta("o1").getMeta(o1.outport2).getUnifierMeta().getAttributes().put(OperatorContext.MEMORY_MB, 4000);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.addStream("o1.outport2", o1.outport2, o2.inport2);
  dag.addStream("o2.outport1", o2.outport1, o3.inport1);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 5, plan.getContainers().size());
  boolean sawOutput1Slider = false;
  boolean sawOutput2Slider = false;
  for (PTContainer container : plan.getContainers()) {
    Assert.assertEquals("number of operators in each container is 1", container.operators.size(), 1);
    if (container.operators.get(0).isUnifier()) {
      String name = container.operators.get(0).getName();
      if (name.equals("o1.outport1#slider")) {
        sawOutput1Slider = true;
        Assert.assertEquals("container memory is 2512", container.getRequiredMemoryMB(), 2512);
      } else if (name.equals("o1.outport2#slider")) {
        sawOutput2Slider = true;
        Assert.assertEquals("container memory is 2512", container.getRequiredMemoryMB(), 4512);
      }
    }
  }
  Assert.assertEquals("Found output1 slider", true, sawOutput1Slider);
  Assert.assertEquals("Found output2 slider", true, sawOutput2Slider);
}
 
Example 14
Source File: StramMiniClusterTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testOperatorFailureRecovery() throws Exception
{

  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(LogicalPlan.APPLICATION_PATH, testMeta.toURI().toString());
  FailingOperator badOperator = dag.addOperator("badOperator", FailingOperator.class);
  dag.getContextAttributes(badOperator).put(OperatorContext.RECOVERY_ATTEMPTS, 1);

  LOG.info("Initializing Client");
  StramClient client = new StramClient(conf, dag);
  try {
    client.start();
    client.startApplication();
    client.setClientTimeout(120000);

    boolean result = client.monitorApplication();

    LOG.info("Client run completed. Result=" + result);
    Assert.assertFalse("should fail", result);

    ApplicationReport ar = client.getApplicationReport();
    Assert.assertEquals("should fail", FinalApplicationStatus.FAILED, ar.getFinalApplicationStatus());
    // unable to get the diagnostics message set by the AM here - see YARN-208
    // diagnostics message does not make it here even with Hadoop 2.2 (but works on standalone cluster)
    //Assert.assertTrue("appReport " + ar, ar.getDiagnostics().contains("badOperator"));
  } finally {
    client.stop();
  }
}
 
Example 15
Source File: HostLocalTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
public void testPartitionLocality()
{
  int partitionCount = 3;
  LogicalPlan dag = new LogicalPlan();
  dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, new File("target", HostLocalTest.class.getName()).getAbsolutePath());
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);

  GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
  LocalityPartitioner partitioner = new LocalityPartitioner();
  partitioner.setPartitionCount(partitionCount);
  dag.getMeta(partitioned).getAttributes().put(OperatorContext.PARTITIONER, partitioner);
  dag.addStream("o1_outport1", o1.outport1, partitioned.inport1);

  StreamingContainerManager scm = new StreamingContainerManager(dag);

  ResourceRequestHandler rr = new ResourceRequestHandler();

  int containerMem = 1000;
  Map<String, NodeReport> nodeReports = Maps.newHashMap();
  for (int i = 0; i < partitionCount; i++) {
    NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host" + (i + 1), 0),
        NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
  }

  // set resources
  rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
  Set<String> expectedHosts = Sets.newHashSet();
  for (int i = 0; i < partitionCount; i++) {
    expectedHosts.add("host" + (i + 1));
  }
  for (ContainerStartRequest csr : scm.containerStartRequests) {
    String host = rr.getHost(csr, true);
    if (host != null) {
      expectedHosts.remove(host);
    }
  }
  Assert.assertTrue("All the allocated hosts removed", expectedHosts.isEmpty());

}
 
Example 16
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
/**
 * Test partitioning of an input operator (no input port).
 * Cover aspects that are not part of generic operator test.
 * Test scaling from one to multiple partitions with unifier when one partition remains unmodified.
 */
@Test
public void testInputOperatorPartitioning()
{
  LogicalPlan dag = new LogicalPlan();
  final TestInputOperator<Object> o1 = dag.addOperator("o1", new TestInputOperator<>());
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  dag.addStream("o1.outport1", o1.output, o2.inport1);

  OperatorMeta o1Meta = dag.getMeta(o1);
  dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{new PartitioningTest.PartitionLoadWatch()}));
  TestPartitioner<TestInputOperator<Object>> partitioner = new TestPartitioner<>();
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, partitioner);

  TestPlanContext ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  PhysicalPlan plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 2, plan.getContainers().size());

  List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
  Assert.assertEquals("partitions " + o1Partitions, 1, o1Partitions.size());
  PTOperator o1p1 = o1Partitions.get(0);

  // verify load update generates expected events per configuration
  Assert.assertEquals("stats handlers " + o1p1, 1, o1p1.statsListeners.size());
  StatsListener l = o1p1.statsListeners.get(0);
  Assert.assertTrue("stats handlers " + o1p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);

  PartitioningTest.PartitionLoadWatch.put(o1p1, 1);
  plan.onStatusUpdate(o1p1);
  Assert.assertEquals("scale up triggered", 1, ctx.events.size());
  // add another partition, keep existing as is
  partitioner.extraPartitions.add(new DefaultPartition<>(o1));
  Runnable r = ctx.events.remove(0);
  r.run();
  partitioner.extraPartitions.clear();

  o1Partitions = plan.getOperators(o1Meta);
  Assert.assertEquals("operators after scale up", 2, o1Partitions.size());
  Assert.assertEquals("first partition unmodified", o1p1, o1Partitions.get(0));
  Assert.assertEquals("single output", 1, o1p1.getOutputs().size());
  Assert.assertEquals("output to unifier", 1, o1p1.getOutputs().get(0).sinks.size());

  Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(o2)));
  Set<PTOperator> expDeploy = Sets.newHashSet(o1Partitions.get(1));
  expDeploy.addAll(plan.getMergeOperators(dag.getMeta(o1)));
  expDeploy.addAll(expUndeploy);
  expDeploy.add(o1p1.getOutputs().get(0).sinks.get(0).target);

  Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
  Assert.assertEquals("deploy", expDeploy, ctx.deploy);

  for (PTOperator p : o1Partitions) {
    Assert.assertEquals("activation window id " + p, Checkpoint.INITIAL_CHECKPOINT, p.recoveryCheckpoint);
    Assert.assertEquals("checkpoints " + p + " " + p.checkpoints, Lists.newArrayList(), p.checkpoints);
    PartitioningTest.PartitionLoadWatch.put(p, -1);
    plan.onStatusUpdate(p);
  }
  ctx.events.remove(0).run();
  Assert.assertEquals("operators after scale down", 1, plan.getOperators(o1Meta).size());
}
 
Example 17
Source File: StatsTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("SleepWhileInLoop")
private void baseTestForQueueSize(int maxTuples, TestCollectorStatsListener statsListener, DAG.Locality locality) throws Exception
{
  LogicalPlan dag = new LogicalPlan();
  String workingDir = new File("target/baseTestForQueueSize").getAbsolutePath();
  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(workingDir, null));
  dag.getAttributes().put(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS, 200);
  TestOperator testOper = dag.addOperator("TestOperator", TestOperator.class);
  testOper.setMaxTuples(maxTuples);

  TestCollector collector = dag.addOperator("Collector", new TestCollector());
  if (statsListener != null) {
    dag.setOperatorAttribute(collector, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{statsListener}));
  }

  dag.addStream("TestTuples", testOper.outport, collector.inport1).setLocality(locality);

  StramLocalCluster lc = new StramLocalCluster(dag);
  lc.runAsync();
  StreamingContainerManager dnmgr = lc.getStreamingContainerManager();
  Map<Integer, PTOperator> operatorMap = dnmgr.getPhysicalPlan().getAllOperators();
  for (PTOperator p : operatorMap.values()) {
    StramTestSupport.waitForActivation(lc, p);
  }

  long startTms = System.currentTimeMillis();
  if (statsListener != null) {
    while (statsListener.collectorOperatorStats.isEmpty() && (StramTestSupport.DEFAULT_TIMEOUT_MILLIS > System.currentTimeMillis() - startTms)) {
      Thread.sleep(300);
      LOG.debug("Waiting for stats");
    }
  } else {
    while (collector.collectorOperatorStats.isEmpty() && (StramTestSupport.DEFAULT_TIMEOUT_MILLIS > System.currentTimeMillis() - startTms)) {
      Thread.sleep(300);
      LOG.debug("Waiting for stats");
    }
  }
  if (statsListener != null) {
    statsListener.validateStats();
  } else {
    collector.validateStats();
  }
  lc.shutdown();
}
 
Example 18
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
/**
 * Test covering scenario when only new partitions are added during dynamic partitioning and there
 * are no changes to existing partitions and partition mapping
 */
@Test
public void testAugmentedDynamicPartitioning()
{
  LogicalPlan dag = new LogicalPlan();

  TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new TestAugmentingPartitioner<TestGeneratorInputOperator>(3));
  dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)new PartitioningTest.PartitionLoadWatch()));
  OperatorMeta o1Meta = dag.getMeta(o1);

  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  OperatorMeta o2Meta = dag.getMeta(o2);

  dag.addStream("o1.outport1", o1.outport, o2.inport1);

  int maxContainers = 10;
  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);

  TestPlanContext ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);

  PhysicalPlan plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 4, plan.getContainers().size());

  List<PTOperator> o1ops = plan.getOperators(o1Meta);
  Assert.assertEquals("number of o1 operators", 3, o1ops.size());

  List<PTOperator> o2ops = plan.getOperators(o2Meta);
  Assert.assertEquals("number of o2 operators", 1, o2ops.size());
  Set<PTOperator> expUndeploy = Sets.newLinkedHashSet();
  expUndeploy.addAll(plan.getOperators(o2Meta));
  expUndeploy.add(plan.getOperators(o2Meta).get(0).upstreamMerge.values().iterator().next());

  for (int i = 0; i < 2; ++i) {
    PartitioningTest.PartitionLoadWatch.put(o1ops.get(i), 1);
    plan.onStatusUpdate(o1ops.get(i));
  }

  ctx.backupRequests = 0;
  ctx.events.remove(0).run();

  Assert.assertEquals("number of containers", 6, plan.getContainers().size());

  Assert.assertEquals("undeployed opertors", expUndeploy, ctx.undeploy);
}
 
Example 19
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
public void testSingleFinalUnifierInputOverride()
{
  LogicalPlan dag = new LogicalPlan();

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  OperatorMeta o1Meta = dag.getMeta(o1);

  GenericTestOperator o2 =  dag.addOperator("o2", GenericTestOperator.class);
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
  dag.setInputPortAttribute(o2.inport1, PortContext.UNIFIER_SINGLE_FINAL, true);
  OperatorMeta o2Meta = dag.getMeta(o2);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);

  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);

  TestPlanContext ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);

  PhysicalPlan plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 6, plan.getContainers().size());

  Assert.assertEquals("o1 merge unifiers", 1, plan.getMergeOperators(o1Meta).size());

  dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, false);
  ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 6, plan.getContainers().size());

  Assert.assertEquals("o1 merge unifiers", 1, plan.getMergeOperators(o1Meta).size());

  dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, true);
  dag.setInputPortAttribute(o2.inport1, PortContext.UNIFIER_SINGLE_FINAL, false);
  ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 5, plan.getContainers().size());

  Set<String> expectedNames = Sets.newHashSet(o1Meta.getMeta(o1.outport1).getUnifierMeta().getName(), o2Meta.getName());
  for (int i = 3; i < 5; ++i) {
    PTContainer container = plan.getContainers().get(i);
    Assert.assertEquals("o2 container size", 2, container.getOperators().size());

    Set<String> names = Sets.newHashSet();
    for (PTOperator operator : container.getOperators()) {
      names.add(operator.getOperatorMeta().getName());
    }
    Assert.assertEquals("o2 container operators", expectedNames, names);
  }
}
 
Example 20
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
public void testStaticPartitioning()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  TestGeneratorInputOperator node0 = dag.addOperator("node0", TestGeneratorInputOperator.class);
  GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
  PartitioningTestOperator partitioned = dag.addOperator("partitioned", PartitioningTestOperator.class);
  partitioned.setPartitionCount(partitioned.partitionKeys.length);
  GenericTestOperator singleton1 = dag.addOperator("singleton1", GenericTestOperator.class);
  GenericTestOperator singleton2 = dag.addOperator("singleton2", GenericTestOperator.class);

  dag.addStream("n0.inport1", node0.outport, node1.inport1);
  dag.addStream("n1.outport1", node1.outport1, partitioned.inport1, partitioned.inportWithCodec);
  dag.addStream("mergeStream", partitioned.outport1, singleton1.inport1, singleton2.inport1);

  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 2);

  OperatorMeta partitionedMeta = dag.getMeta(partitioned);

  dag.validate();

  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());

  Assert.assertEquals("number of containers", 2, plan.getContainers().size());
  Assert.assertNotNull("partition map", partitioned.partitions);
  Assert.assertEquals("partition map " + partitioned.partitions, 3, partitioned.partitions.size());

  List<PTOperator> n2Instances = plan.getOperators(partitionedMeta);
  Assert.assertEquals("partition instances " + n2Instances, partitioned.partitionKeys.length, n2Instances.size());
  for (int i = 0; i < n2Instances.size(); i++) {
    PTOperator po = n2Instances.get(i);
    Map<String, PTInput> inputsMap = new HashMap<>();
    for (PTInput input: po.getInputs()) {
      inputsMap.put(input.portName, input);
      Assert.assertEquals("partitions " + input, Sets.newHashSet(partitioned.partitionKeys[i]), input.partitions.partitions);
      //Assert.assertEquals("codec " + input.logicalStream, PartitioningTestStreamCodec.class, input.logicalStream.getCodecClass());
    }
    Assert.assertEquals("number inputs " + inputsMap, Sets.newHashSet(PartitioningTestOperator.IPORT1, PartitioningTestOperator.INPORT_WITH_CODEC), inputsMap.keySet());
  }

  Collection<PTOperator> unifiers = plan.getMergeOperators(partitionedMeta);
  Assert.assertEquals("number unifiers " + partitionedMeta, 0, unifiers.size());
}