org.apache.flink.shaded.guava18.com.google.common.collect.Sets Java Examples

The following examples show how to use org.apache.flink.shaded.guava18.com.google.common.collect.Sets. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private void freeNoLongerUsedSlots(AllocatedSlotReport allocatedSlotReport) {
	final Iterator<AllocationID> slotsTaskManagerSide = taskSlotTable.getActiveSlots(allocatedSlotReport.getJobId());
	final Set<AllocationID> activeSlots = Sets.newHashSet(slotsTaskManagerSide);
	final Set<AllocationID> reportedSlots = allocatedSlotReport.getAllocatedSlotInfos().stream()
			.map(AllocatedSlotInfo::getAllocationId).collect(Collectors.toSet());

	final Sets.SetView<AllocationID> difference = Sets.difference(activeSlots, reportedSlots);

	for (AllocationID allocationID : difference) {
		freeSlotInternal(
			allocationID,
			new FlinkException(
				String.format("%s is no longer allocated by job %s.", allocationID, allocatedSlotReport.getJobId())));
	}
}
 
Example #2
Source File: TaskSlotTableImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that one can can mark allocated slots as active.
 */
@Test
public void testTryMarkSlotActive() throws Exception {
	final TaskSlotTableImpl<?> taskSlotTable = createTaskSlotTableAndStart(3);

	try {
		final JobID jobId1 = new JobID();
		final AllocationID allocationId1 = new AllocationID();
		taskSlotTable.allocateSlot(0, jobId1, allocationId1, SLOT_TIMEOUT);
		final AllocationID allocationId2 = new AllocationID();
		taskSlotTable.allocateSlot(1, jobId1, allocationId2, SLOT_TIMEOUT);
		final AllocationID allocationId3 = new AllocationID();
		final JobID jobId2 = new JobID();
		taskSlotTable.allocateSlot(2, jobId2, allocationId3, SLOT_TIMEOUT);

		taskSlotTable.markSlotActive(allocationId1);

		assertThat(taskSlotTable.isAllocated(0, jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.isAllocated(1, jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.isAllocated(2, jobId2, allocationId3), is(true));

		assertThat(IteratorUtils.toList(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(Arrays.asList(allocationId1))));

		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId3), is(false));

		assertThat(Sets.newHashSet(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(new HashSet<>(Arrays.asList(allocationId2, allocationId1)))));
	} finally {
		taskSlotTable.close();
		assertThat(taskSlotTable.isClosed(), is(true));
	}
}
 
Example #3
Source File: TaskExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private void freeNoLongerUsedSlots(AllocatedSlotReport allocatedSlotReport) {
	final Iterator<AllocationID> slotsTaskManagerSide = taskSlotTable.getActiveSlots(allocatedSlotReport.getJobId());
	final Set<AllocationID> activeSlots = Sets.newHashSet(slotsTaskManagerSide);
	final Set<AllocationID> reportedSlots = allocatedSlotReport.getAllocatedSlotInfos().stream()
			.map(AllocatedSlotInfo::getAllocationId).collect(Collectors.toSet());

	final Sets.SetView<AllocationID> difference = Sets.difference(activeSlots, reportedSlots);

	for (AllocationID allocationID : difference) {
		freeSlotInternal(
			allocationID,
			new FlinkException(
				String.format("%s is no longer allocated by job %s.", allocationID, allocatedSlotReport.getJobId())));
	}
}
 
Example #4
Source File: ConfigOptionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeprecationForDeprecatedKeys() {
	String[] deprecatedKeys = new String[] { "deprecated1", "deprecated2" };
	final Set<String> expectedDeprecatedKeys = new HashSet<>(Arrays.asList(deprecatedKeys));

	final ConfigOption<Integer> optionWithDeprecatedKeys = ConfigOptions
		.key("key")
		.defaultValue(0)
		.withDeprecatedKeys(deprecatedKeys)
		.withFallbackKeys("fallback1");

	assertTrue(optionWithDeprecatedKeys.hasDeprecatedKeys());
	assertEquals(expectedDeprecatedKeys, Sets.newHashSet(optionWithDeprecatedKeys.deprecatedKeys()));
}
 
Example #5
Source File: TaskSlotTableTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that one can can mark allocated slots as active.
 */
@Test
public void testTryMarkSlotActive() throws SlotNotFoundException {
	final TaskSlotTable taskSlotTable = createTaskSlotTable(Collections.nCopies(3, ResourceProfile.UNKNOWN));

	try {
		taskSlotTable.start(new TestingSlotActionsBuilder().build());

		final JobID jobId1 = new JobID();
		final AllocationID allocationId1 = new AllocationID();
		taskSlotTable.allocateSlot(0, jobId1, allocationId1, SLOT_TIMEOUT);
		final AllocationID allocationId2 = new AllocationID();
		taskSlotTable.allocateSlot(1, jobId1, allocationId2, SLOT_TIMEOUT);
		final AllocationID allocationId3 = new AllocationID();
		final JobID jobId2 = new JobID();
		taskSlotTable.allocateSlot(2, jobId2, allocationId3, SLOT_TIMEOUT);

		taskSlotTable.markSlotActive(allocationId1);

		assertThat(taskSlotTable.isAllocated(0, jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.isAllocated(1, jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.isAllocated(2, jobId2, allocationId3), is(true));

		assertThat(IteratorUtils.toList(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(Arrays.asList(allocationId1))));

		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId3), is(false));

		assertThat(Sets.newHashSet(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(new HashSet<>(Arrays.asList(allocationId2, allocationId1)))));
	} finally {
		taskSlotTable.stop();
	}
}
 
Example #6
Source File: UnionInputGate.java    From flink with Apache License 2.0 5 votes vote down vote up
public UnionInputGate(InputGate... inputGates) {
	this.inputGates = checkNotNull(inputGates);
	checkArgument(inputGates.length > 1, "Union input gate should union at least two input gates.");

	this.inputGateToIndexOffsetMap = Maps.newHashMapWithExpectedSize(inputGates.length);
	this.inputGatesWithRemainingData = Sets.newHashSetWithExpectedSize(inputGates.length);

	int currentNumberOfInputChannels = 0;

	synchronized (inputGatesWithData) {
		for (InputGate inputGate : inputGates) {
			if (inputGate instanceof UnionInputGate) {
				// if we want to add support for this, we need to implement pollNext()
				throw new UnsupportedOperationException("Cannot union a union of input gates.");
			}

			// The offset to use for buffer or event instances received from this input gate.
			inputGateToIndexOffsetMap.put(checkNotNull(inputGate), currentNumberOfInputChannels);
			inputGatesWithRemainingData.add(inputGate);

			currentNumberOfInputChannels += inputGate.getNumberOfInputChannels();

			CompletableFuture<?> available = inputGate.isAvailable();

			if (available.isDone()) {
				inputGatesWithData.add(inputGate);
			} else {
				available.thenRun(() -> queueInputGate(inputGate));
			}
		}

		if (!inputGatesWithData.isEmpty()) {
			isAvailable = AVAILABLE;
		}
	}

	this.totalNumberOfInputChannels = currentNumberOfInputChannels;
}
 
Example #7
Source File: TaskExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private void freeNoLongerUsedSlots(AllocatedSlotReport allocatedSlotReport) {
	final Iterator<AllocationID> slotsTaskManagerSide = taskSlotTable.getActiveSlots(allocatedSlotReport.getJobId());
	final Set<AllocationID> activeSlots = Sets.newHashSet(slotsTaskManagerSide);
	final Set<AllocationID> reportedSlots = allocatedSlotReport.getAllocatedSlotInfos().stream()
			.map(AllocatedSlotInfo::getAllocationId).collect(Collectors.toSet());

	final Sets.SetView<AllocationID> difference = Sets.difference(activeSlots, reportedSlots);

	for (AllocationID allocationID : difference) {
		freeSlotInternal(
			allocationID,
			new FlinkException(
				String.format("%s is no longer allocated by job %s.", allocationID, allocatedSlotReport.getJobId())));
	}
}
 
Example #8
Source File: FlinkPulsarITest.java    From pulsar-flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testStartFromExternalSubscription() throws Exception {
    String topic = newTopic();
    List<MessageId> mids = sendTypedMessages(topic, SchemaType.INT32, Arrays.asList(
            //  0,   1,   2, 3, 4, 5,  6,  7,  8
            -20, -21, -22, 1, 2, 3, 10, 11, 12), Optional.empty());

    String subName = "sub-1";

    PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build();

    admin.topics().createSubscription(TopicName.get(topic).toString(), subName, mids.get(3));

    Map<String, Set<Integer>> expectedData = new HashMap<>();
    expectedData.put(topic, new HashSet<>(Arrays.asList(2, 3, 10, 11, 12)));

    StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
    see.getConfig().disableSysoutLogging();
    see.setParallelism(1);

    Properties sourceProps = sourceProperties();
    sourceProps.setProperty(TOPIC_SINGLE_OPTION_KEY, topic);
    DataStream stream = see.addSource(
            new FlinkPulsarRowSource(serviceUrl, adminUrl, sourceProps).setStartFromSubscription(subName));
    stream.flatMap(new CheckAllMessageExist(expectedData, 5)).setParallelism(1);

    TestUtils.tryExecute(see, "start from specific");

    assertTrue(Sets.newHashSet(admin.topics().getSubscriptions(topic)).contains(subName));

    admin.close();
}
 
Example #9
Source File: DiscovererTest.java    From pulsar-flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPartitionEqualConsumerNumber() {
    try {
        Set<String> mockAllTopics = Sets.newHashSet(
                topicName(TEST_TOPIC, 0),
                topicName(TEST_TOPIC, 1),
                topicName(TEST_TOPIC, 2),
                topicName(TEST_TOPIC, 3));

        int numSubTasks = mockAllTopics.size();

        for (int i = 0; i < numSubTasks; i++) {
            TestMetadataReader discoverer = new TestMetadataReader(
                    params, i, numSubTasks,
                    TestMetadataReader.createMockGetAllTopicsSequenceFromFixedReturn(mockAllTopics));

            Set<String> initials = discoverer.discoverTopicChanges();
            assertEquals(1, initials.size());
            assertTrue(mockAllTopics.containsAll(initials));
            assertEquals(i,
                    TestMetadataReader.getExpectedSubtaskIndex(initials.iterator().next(), numSubTasks));

            Set<String> second = discoverer.discoverTopicChanges();
            Set<String> third = discoverer.discoverTopicChanges();
            assertEquals(second.size(), 0);
            assertEquals(third.size(), 0);
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
 
Example #10
Source File: TaskSlotTableTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that one can can mark allocated slots as active.
 */
@Test
public void testTryMarkSlotActive() throws SlotNotFoundException {
	final TaskSlotTable taskSlotTable = createTaskSlotTable(Collections.nCopies(3, ResourceProfile.UNKNOWN));

	try {
		taskSlotTable.start(new TestingSlotActionsBuilder().build());

		final JobID jobId1 = new JobID();
		final AllocationID allocationId1 = new AllocationID();
		taskSlotTable.allocateSlot(0, jobId1, allocationId1, SLOT_TIMEOUT);
		final AllocationID allocationId2 = new AllocationID();
		taskSlotTable.allocateSlot(1, jobId1, allocationId2, SLOT_TIMEOUT);
		final AllocationID allocationId3 = new AllocationID();
		final JobID jobId2 = new JobID();
		taskSlotTable.allocateSlot(2, jobId2, allocationId3, SLOT_TIMEOUT);

		taskSlotTable.markSlotActive(allocationId1);

		assertThat(taskSlotTable.isAllocated(0, jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.isAllocated(1, jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.isAllocated(2, jobId2, allocationId3), is(true));

		assertThat(IteratorUtils.toList(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(Arrays.asList(allocationId1))));

		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId1), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId2), is(true));
		assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId3), is(false));

		assertThat(Sets.newHashSet(taskSlotTable.getActiveSlots(jobId1)), is(equalTo(new HashSet<>(Arrays.asList(allocationId2, allocationId1)))));
	} finally {
		taskSlotTable.stop();
	}
}
 
Example #11
Source File: UnionInputGate.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public UnionInputGate(InputGate... inputGates) {
	this.inputGates = checkNotNull(inputGates);
	checkArgument(inputGates.length > 1, "Union input gate should union at least two input gates.");

	this.inputGateToIndexOffsetMap = Maps.newHashMapWithExpectedSize(inputGates.length);
	this.inputGatesWithRemainingData = Sets.newHashSetWithExpectedSize(inputGates.length);

	int currentNumberOfInputChannels = 0;

	for (InputGate inputGate : inputGates) {
		if (inputGate instanceof UnionInputGate) {
			// if we want to add support for this, we need to implement pollNextBufferOrEvent()
			throw new UnsupportedOperationException("Cannot union a union of input gates.");
		}

		// The offset to use for buffer or event instances received from this input gate.
		inputGateToIndexOffsetMap.put(checkNotNull(inputGate), currentNumberOfInputChannels);
		inputGatesWithRemainingData.add(inputGate);

		currentNumberOfInputChannels += inputGate.getNumberOfInputChannels();

		// Register the union gate as a listener for all input gates
		inputGate.registerListener(this);
	}

	this.totalNumberOfInputChannels = currentNumberOfInputChannels;
}
 
Example #12
Source File: NFACompilerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the NFACompiler generates the correct NFA from a given Pattern.
 */
@Test
public void testNFACompilerWithSimplePattern() {
	Pattern<Event, Event> pattern = Pattern.<Event>begin("start").where(startFilter)
		.followedBy("middle").subtype(SubEvent.class)
		.next("end").where(endFilter);

	NFA<Event> nfa = compile(pattern, false);

	Collection<State<Event>> states = nfa.getStates();
	assertEquals(4, states.size());

	Map<String, State<Event>> stateMap = new HashMap<>();
	for (State<Event> state : states) {
		stateMap.put(state.getName(), state);
	}

	assertTrue(stateMap.containsKey("start"));
	State<Event> startState = stateMap.get("start");
	assertTrue(startState.isStart());
	final Set<Tuple2<String, StateTransitionAction>> startTransitions = unfoldTransitions(startState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.TAKE)
	), startTransitions);

	assertTrue(stateMap.containsKey("middle"));
	State<Event> middleState = stateMap.get("middle");
	final Set<Tuple2<String, StateTransitionAction>> middleTransitions = unfoldTransitions(middleState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.IGNORE),
		Tuple2.of("end", StateTransitionAction.TAKE)
	), middleTransitions);

	assertTrue(stateMap.containsKey("end"));
	State<Event> endState = stateMap.get("end");
	final Set<Tuple2<String, StateTransitionAction>> endTransitions = unfoldTransitions(endState);
	assertEquals(Sets.newHashSet(
		Tuple2.of(NFACompiler.ENDING_STATE_NAME, StateTransitionAction.TAKE)
	), endTransitions);

	assertTrue(stateMap.containsKey(NFACompiler.ENDING_STATE_NAME));
	State<Event> endingState = stateMap.get(NFACompiler.ENDING_STATE_NAME);
	assertTrue(endingState.isFinal());
	assertEquals(0, endingState.getStateTransitions().size());
}
 
Example #13
Source File: FlinkPulsarSource.java    From pulsar-flink with Apache License 2.0 4 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {

    this.taskIndex = getRuntimeContext().getIndexOfThisSubtask();
    this.numParallelTasks = getRuntimeContext().getNumberOfParallelSubtasks();

    this.metadataReader = createMetadataReader();

    ownedTopicStarts = new HashMap<>();
    Set<String> allTopics = metadataReader.discoverTopicChanges();

    log.info("Discovered topics : {}", allTopics);

    if (restoredState != null) {
        allTopics.stream()
                .filter(k -> !restoredState.containsKey(k))
                .forEach(t -> restoredState.put(t, MessageId.earliest));

        restoredState.entrySet().stream()
                .filter(e -> SourceSinkUtils.belongsTo(e.getKey(), numParallelTasks, taskIndex))
                .forEach(e -> ownedTopicStarts.put(e.getKey(), e.getValue()));

        Set<String> goneTopics = Sets.difference(restoredState.keySet(), allTopics).stream()
                .filter(k -> SourceSinkUtils.belongsTo(k, numParallelTasks, taskIndex))
                .collect(Collectors.toSet());

        for (String goneTopic : goneTopics) {
            log.warn(goneTopic + " is removed from subscription since " +
                    "it no longer matches with topics settings.");
            ownedTopicStarts.remove(goneTopic);
        }

        log.info("Source {} will start reading {} topics in restored state {}",
                taskIndex, ownedTopicStarts.size(), StringUtils.join(ownedTopicStarts.entrySet()));
    } else {
        if (specificStartupOffsets == null && specificStartupOffsetsAsBytes != null) {
            specificStartupOffsets = new HashMap<>();
            for (Map.Entry<String, byte[]> entry : specificStartupOffsetsAsBytes.entrySet()) {
                specificStartupOffsets.put(entry.getKey(), MessageId.fromByteArray(entry.getValue()));
            }
        }
        Map<String, MessageId> allTopicOffsets =
                offsetForEachTopic(allTopics, startupMode, specificStartupOffsets);

        ownedTopicStarts.putAll(allTopicOffsets.entrySet().stream()
                .filter(e -> SourceSinkUtils.belongsTo(e.getKey(), numParallelTasks, taskIndex))
                .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())));

        if (ownedTopicStarts.isEmpty()) {
            log.info("Source {} initially has no topics to read from.", taskIndex);
        } else {
            log.info("Source {} will start reading {} topics from initialized positions: {}",
                    taskIndex, ownedTopicStarts.size(), ownedTopicStarts);
        }
    }
}
 
Example #14
Source File: TwoInputNode.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
protected void instantiate(OperatorDescriptorDual operator, Channel in1, Channel in2,
		List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator,
		RequestedGlobalProperties globPropsReq1, RequestedGlobalProperties globPropsReq2,
		RequestedLocalProperties locPropsReq1, RequestedLocalProperties locPropsReq2)
{
	final PlanNode inputSource1 = in1.getSource();
	final PlanNode inputSource2 = in2.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			if (!(areBranchCompatible(bcSource, inputSource1) || areBranchCompatible(bcSource, inputSource2))) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		placePipelineBreakersIfNecessary(operator.getStrategy(), in1, in2);
		
		DualInputPlanNode node = operator.instantiate(in1, in2, this);
		node.setBroadcastInputs(broadcastChannelsCombination);

		SemanticProperties semPropsGlobalPropFiltering = getSemanticPropertiesForGlobalPropertyFiltering();
		GlobalProperties gp1 = in1.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 0);
		GlobalProperties gp2 = in2.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 1);
		GlobalProperties combined = operator.computeGlobalProperties(gp1, gp2);

		SemanticProperties semPropsLocalPropFiltering = getSemanticPropertiesForLocalPropertyFiltering();
		LocalProperties lp1 = in1.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 0);
		LocalProperties lp2 = in2.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 1);
		LocalProperties locals = operator.computeLocalProperties(lp1, lp2);
		
		node.initProperties(combined, locals);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}
 
Example #15
Source File: SingleInputNode.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
protected void instantiateCandidate(OperatorDescriptorSingle dps, Channel in, List<Set<? extends NamedChannel>> broadcastPlanChannels,
		List<PlanNode> target, CostEstimator estimator, RequestedGlobalProperties globPropsReq, RequestedLocalProperties locPropsReq)
{
	final PlanNode inputSource = in.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		boolean requiresPipelinebreaker = false;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			// check branch compatibility against input
			if (!areBranchCompatible(bcSource, inputSource)) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
			
			// check if there is a common predecessor and whether there is a dam on the way to all common predecessors
			if (in.isOnDynamicPath() && this.hereJoinedBranches != null) {
				for (OptimizerNode brancher : this.hereJoinedBranches) {
					PlanNode candAtBrancher = in.getSource().getCandidateAtBranchPoint(brancher);
					
					if (candAtBrancher == null) {
						// closed branch between two broadcast variables
						continue;
					}
					
					SourceAndDamReport res = in.getSource().hasDamOnPathDownTo(candAtBrancher);
					if (res == NOT_FOUND) {
						throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
					} else if (res == FOUND_SOURCE) {
						requiresPipelinebreaker = true;
						break;
					} else if (res == FOUND_SOURCE_AND_DAM) {
						// good
					} else {
						throw new CompilerException();
					}
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		if (requiresPipelinebreaker) {
			in.setTempMode(in.getTempMode().makePipelineBreaker());
		}
		
		final SingleInputPlanNode node = dps.instantiate(in, this);
		node.setBroadcastInputs(broadcastChannelsCombination);
		
		// compute how the strategy affects the properties
		GlobalProperties gProps = in.getGlobalProperties().clone();
		LocalProperties lProps = in.getLocalProperties().clone();
		gProps = dps.computeGlobalProperties(gProps);
		lProps = dps.computeLocalProperties(lProps);

		// filter by the user code field copies
		gProps = gProps.filterBySemanticProperties(getSemanticPropertiesForGlobalPropertyFiltering(), 0);
		lProps = lProps.filterBySemanticProperties(getSemanticPropertiesForLocalPropertyFiltering(), 0);
		
		// apply
		node.initProperties(gProps, lProps);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}
 
Example #16
Source File: SingleInputNode.java    From flink with Apache License 2.0 4 votes vote down vote up
protected void instantiateCandidate(OperatorDescriptorSingle dps, Channel in, List<Set<? extends NamedChannel>> broadcastPlanChannels,
		List<PlanNode> target, CostEstimator estimator, RequestedGlobalProperties globPropsReq, RequestedLocalProperties locPropsReq)
{
	final PlanNode inputSource = in.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		boolean requiresPipelinebreaker = false;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			// check branch compatibility against input
			if (!areBranchCompatible(bcSource, inputSource)) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
			
			// check if there is a common predecessor and whether there is a dam on the way to all common predecessors
			if (in.isOnDynamicPath() && this.hereJoinedBranches != null) {
				for (OptimizerNode brancher : this.hereJoinedBranches) {
					PlanNode candAtBrancher = in.getSource().getCandidateAtBranchPoint(brancher);
					
					if (candAtBrancher == null) {
						// closed branch between two broadcast variables
						continue;
					}
					
					SourceAndDamReport res = in.getSource().hasDamOnPathDownTo(candAtBrancher);
					if (res == NOT_FOUND) {
						throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
					} else if (res == FOUND_SOURCE) {
						requiresPipelinebreaker = true;
						break;
					} else if (res == FOUND_SOURCE_AND_DAM) {
						// good
					} else {
						throw new CompilerException();
					}
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		if (requiresPipelinebreaker) {
			in.setTempMode(in.getTempMode().makePipelineBreaker());
		}
		
		final SingleInputPlanNode node = dps.instantiate(in, this);
		node.setBroadcastInputs(broadcastChannelsCombination);
		
		// compute how the strategy affects the properties
		GlobalProperties gProps = in.getGlobalProperties().clone();
		LocalProperties lProps = in.getLocalProperties().clone();
		gProps = dps.computeGlobalProperties(gProps);
		lProps = dps.computeLocalProperties(lProps);

		// filter by the user code field copies
		gProps = gProps.filterBySemanticProperties(getSemanticPropertiesForGlobalPropertyFiltering(), 0);
		lProps = lProps.filterBySemanticProperties(getSemanticPropertiesForLocalPropertyFiltering(), 0);
		
		// apply
		node.initProperties(gProps, lProps);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}
 
Example #17
Source File: TwoInputNode.java    From flink with Apache License 2.0 4 votes vote down vote up
protected void instantiate(OperatorDescriptorDual operator, Channel in1, Channel in2,
		List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator,
		RequestedGlobalProperties globPropsReq1, RequestedGlobalProperties globPropsReq2,
		RequestedLocalProperties locPropsReq1, RequestedLocalProperties locPropsReq2)
{
	final PlanNode inputSource1 = in1.getSource();
	final PlanNode inputSource2 = in2.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			if (!(areBranchCompatible(bcSource, inputSource1) || areBranchCompatible(bcSource, inputSource2))) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		placePipelineBreakersIfNecessary(operator.getStrategy(), in1, in2);
		
		DualInputPlanNode node = operator.instantiate(in1, in2, this);
		node.setBroadcastInputs(broadcastChannelsCombination);

		SemanticProperties semPropsGlobalPropFiltering = getSemanticPropertiesForGlobalPropertyFiltering();
		GlobalProperties gp1 = in1.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 0);
		GlobalProperties gp2 = in2.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 1);
		GlobalProperties combined = operator.computeGlobalProperties(gp1, gp2);

		SemanticProperties semPropsLocalPropFiltering = getSemanticPropertiesForLocalPropertyFiltering();
		LocalProperties lp1 = in1.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 0);
		LocalProperties lp2 = in2.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 1);
		LocalProperties locals = operator.computeLocalProperties(lp1, lp2);
		
		node.initProperties(combined, locals);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}
 
Example #18
Source File: NFACompilerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the NFACompiler generates the correct NFA from a given Pattern.
 */
@Test
public void testNFACompilerWithSimplePattern() {
	Pattern<Event, Event> pattern = Pattern.<Event>begin("start").where(startFilter)
		.followedBy("middle").subtype(SubEvent.class)
		.next("end").where(endFilter);

	NFA<Event> nfa = compile(pattern, false);

	Collection<State<Event>> states = nfa.getStates();
	assertEquals(4, states.size());

	Map<String, State<Event>> stateMap = new HashMap<>();
	for (State<Event> state : states) {
		stateMap.put(state.getName(), state);
	}

	assertTrue(stateMap.containsKey("start"));
	State<Event> startState = stateMap.get("start");
	assertTrue(startState.isStart());
	final Set<Tuple2<String, StateTransitionAction>> startTransitions = unfoldTransitions(startState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.TAKE)
	), startTransitions);

	assertTrue(stateMap.containsKey("middle"));
	State<Event> middleState = stateMap.get("middle");
	final Set<Tuple2<String, StateTransitionAction>> middleTransitions = unfoldTransitions(middleState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.IGNORE),
		Tuple2.of("end", StateTransitionAction.TAKE)
	), middleTransitions);

	assertTrue(stateMap.containsKey("end"));
	State<Event> endState = stateMap.get("end");
	final Set<Tuple2<String, StateTransitionAction>> endTransitions = unfoldTransitions(endState);
	assertEquals(Sets.newHashSet(
		Tuple2.of(NFACompiler.ENDING_STATE_NAME, StateTransitionAction.TAKE)
	), endTransitions);

	assertTrue(stateMap.containsKey(NFACompiler.ENDING_STATE_NAME));
	State<Event> endingState = stateMap.get(NFACompiler.ENDING_STATE_NAME);
	assertTrue(endingState.isFinal());
	assertEquals(0, endingState.getStateTransitions().size());
}
 
Example #19
Source File: NFACompilerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the NFACompiler generates the correct NFA from a given Pattern.
 */
@Test
public void testNFACompilerWithSimplePattern() {
	Pattern<Event, Event> pattern = Pattern.<Event>begin("start").where(startFilter)
		.followedBy("middle").subtype(SubEvent.class)
		.next("end").where(endFilter);

	NFA<Event> nfa = compile(pattern, false);

	Collection<State<Event>> states = nfa.getStates();
	assertEquals(4, states.size());

	Map<String, State<Event>> stateMap = new HashMap<>();
	for (State<Event> state : states) {
		stateMap.put(state.getName(), state);
	}

	assertTrue(stateMap.containsKey("start"));
	State<Event> startState = stateMap.get("start");
	assertTrue(startState.isStart());
	final Set<Tuple2<String, StateTransitionAction>> startTransitions = unfoldTransitions(startState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.TAKE)
	), startTransitions);

	assertTrue(stateMap.containsKey("middle"));
	State<Event> middleState = stateMap.get("middle");
	final Set<Tuple2<String, StateTransitionAction>> middleTransitions = unfoldTransitions(middleState);
	assertEquals(Sets.newHashSet(
		Tuple2.of("middle", StateTransitionAction.IGNORE),
		Tuple2.of("end", StateTransitionAction.TAKE)
	), middleTransitions);

	assertTrue(stateMap.containsKey("end"));
	State<Event> endState = stateMap.get("end");
	final Set<Tuple2<String, StateTransitionAction>> endTransitions = unfoldTransitions(endState);
	assertEquals(Sets.newHashSet(
		Tuple2.of(NFACompiler.ENDING_STATE_NAME, StateTransitionAction.TAKE)
	), endTransitions);

	assertTrue(stateMap.containsKey(NFACompiler.ENDING_STATE_NAME));
	State<Event> endingState = stateMap.get(NFACompiler.ENDING_STATE_NAME);
	assertTrue(endingState.isFinal());
	assertEquals(0, endingState.getStateTransitions().size());
}
 
Example #20
Source File: TtlMapStateAllEntriesTestContext.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public boolean isOriginalEmptyValue() throws Exception {
	return Objects.equals(emptyValue, Sets.newHashSet(((Iterable<?>) getOriginal()).iterator()));
}
 
Example #21
Source File: DefaultExecutionTopologyTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void assertRegionContainsAllVertices(final DefaultSchedulingPipelinedRegion pipelinedRegionOfVertex) {
	final Set<DefaultExecutionVertex> allVertices = Sets.newHashSet(pipelinedRegionOfVertex.getVertices());
	assertEquals(Sets.newHashSet(adapter.getVertices()), allVertices);
}
 
Example #22
Source File: SingleInputNode.java    From flink with Apache License 2.0 4 votes vote down vote up
protected void instantiateCandidate(OperatorDescriptorSingle dps, Channel in, List<Set<? extends NamedChannel>> broadcastPlanChannels,
		List<PlanNode> target, CostEstimator estimator, RequestedGlobalProperties globPropsReq, RequestedLocalProperties locPropsReq)
{
	final PlanNode inputSource = in.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		boolean requiresPipelinebreaker = false;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			// check branch compatibility against input
			if (!areBranchCompatible(bcSource, inputSource)) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
			
			// check if there is a common predecessor and whether there is a dam on the way to all common predecessors
			if (in.isOnDynamicPath() && this.hereJoinedBranches != null) {
				for (OptimizerNode brancher : this.hereJoinedBranches) {
					PlanNode candAtBrancher = in.getSource().getCandidateAtBranchPoint(brancher);
					
					if (candAtBrancher == null) {
						// closed branch between two broadcast variables
						continue;
					}
					
					SourceAndDamReport res = in.getSource().hasDamOnPathDownTo(candAtBrancher);
					if (res == NOT_FOUND) {
						throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
					} else if (res == FOUND_SOURCE) {
						requiresPipelinebreaker = true;
						break;
					} else if (res == FOUND_SOURCE_AND_DAM) {
						// good
					} else {
						throw new CompilerException();
					}
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		if (requiresPipelinebreaker) {
			in.setTempMode(in.getTempMode().makePipelineBreaker());
		}
		
		final SingleInputPlanNode node = dps.instantiate(in, this);
		node.setBroadcastInputs(broadcastChannelsCombination);
		
		// compute how the strategy affects the properties
		GlobalProperties gProps = in.getGlobalProperties().clone();
		LocalProperties lProps = in.getLocalProperties().clone();
		gProps = dps.computeGlobalProperties(gProps);
		lProps = dps.computeLocalProperties(lProps);

		// filter by the user code field copies
		gProps = gProps.filterBySemanticProperties(getSemanticPropertiesForGlobalPropertyFiltering(), 0);
		lProps = lProps.filterBySemanticProperties(getSemanticPropertiesForLocalPropertyFiltering(), 0);
		
		// apply
		node.initProperties(gProps, lProps);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}
 
Example #23
Source File: TwoInputNode.java    From flink with Apache License 2.0 4 votes vote down vote up
protected void instantiate(OperatorDescriptorDual operator, Channel in1, Channel in2,
		List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator,
		RequestedGlobalProperties globPropsReq1, RequestedGlobalProperties globPropsReq2,
		RequestedLocalProperties locPropsReq1, RequestedLocalProperties locPropsReq2)
{
	final PlanNode inputSource1 = in1.getSource();
	final PlanNode inputSource2 = in2.getSource();
	
	for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
		
		boolean validCombination = true;
		
		// check whether the broadcast inputs use the same plan candidate at the branching point
		for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
			NamedChannel nc = broadcastChannelsCombination.get(i);
			PlanNode bcSource = nc.getSource();
			
			if (!(areBranchCompatible(bcSource, inputSource1) || areBranchCompatible(bcSource, inputSource2))) {
				validCombination = false;
				break;
			}
			
			// check branch compatibility against all other broadcast variables
			for (int k = 0; k < i; k++) {
				PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
				
				if (!areBranchCompatible(bcSource, otherBcSource)) {
					validCombination = false;
					break;
				}
			}
		}
		
		if (!validCombination) {
			continue;
		}
		
		placePipelineBreakersIfNecessary(operator.getStrategy(), in1, in2);
		
		DualInputPlanNode node = operator.instantiate(in1, in2, this);
		node.setBroadcastInputs(broadcastChannelsCombination);

		SemanticProperties semPropsGlobalPropFiltering = getSemanticPropertiesForGlobalPropertyFiltering();
		GlobalProperties gp1 = in1.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 0);
		GlobalProperties gp2 = in2.getGlobalProperties().clone()
				.filterBySemanticProperties(semPropsGlobalPropFiltering, 1);
		GlobalProperties combined = operator.computeGlobalProperties(gp1, gp2);

		SemanticProperties semPropsLocalPropFiltering = getSemanticPropertiesForLocalPropertyFiltering();
		LocalProperties lp1 = in1.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 0);
		LocalProperties lp2 = in2.getLocalProperties().clone()
				.filterBySemanticProperties(semPropsLocalPropFiltering, 1);
		LocalProperties locals = operator.computeLocalProperties(lp1, lp2);
		
		node.initProperties(combined, locals);
		node.updatePropertiesWithUniqueSets(getUniqueFields());
		target.add(node);
	}
}