org.apache.flink.runtime.jobgraph.IntermediateDataSetID Java Examples

The following examples show how to use org.apache.flink.runtime.jobgraph.IntermediateDataSetID. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestingSchedulingTopology.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected List<TestingSchedulingResultPartition> connect() {
	final List<TestingSchedulingResultPartition> resultPartitions = new ArrayList<>();
	final IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID();

	for (TestingSchedulingExecutionVertex producer : producers) {

		final TestingSchedulingResultPartition resultPartition = initTestingSchedulingResultPartitionBuilder()
			.withIntermediateDataSetID(intermediateDataSetId)
			.withResultPartitionState(resultPartitionState)
			.build();
		resultPartition.setProducer(producer);
		producer.addProducedPartition(resultPartition);

		for (TestingSchedulingExecutionVertex consumer : consumers) {
			consumer.addConsumedPartition(resultPartition);
			resultPartition.addConsumer(consumer);
		}
		resultPartitions.add(resultPartition);
	}

	return resultPartitions;
}
 
Example #2
Source File: TaskExecutorPartitionTrackerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testStopTrackingAndReleaseJobPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.stopTrackingAndReleaseJobPartitions(Collections.singleton(resultPartitionId1));

	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example #3
Source File: DefaultSchedulingExecutionVertexTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {

	intermediateResultPartitionId = new IntermediateResultPartitionID();

	DefaultSchedulingResultPartition schedulingResultPartition = new DefaultSchedulingResultPartition(
		intermediateResultPartitionId,
		new IntermediateDataSetID(),
		BLOCKING);
	producerVertex = new DefaultSchedulingExecutionVertex(
		new ExecutionVertexID(new JobVertexID(), 0),
		Collections.singletonList(schedulingResultPartition),
		stateSupplier,
		ANY);
	schedulingResultPartition.setProducer(producerVertex);
	consumerVertex = new DefaultSchedulingExecutionVertex(
		new ExecutionVertexID(new JobVertexID(), 0),
		Collections.emptyList(),
		stateSupplier,
		ANY);
	consumerVertex.addConsumedPartition(schedulingResultPartition);
}
 
Example #4
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #5
Source File: PartitionDescriptor.java    From flink with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public PartitionDescriptor(
		IntermediateDataSetID resultId,
		int totalNumberOfPartitions,
		IntermediateResultPartitionID partitionId,
		ResultPartitionType partitionType,
		int numberOfSubpartitions,
		int connectionIndex) {
	this.resultId = checkNotNull(resultId);
	checkArgument(totalNumberOfPartitions >= 1);
	this.totalNumberOfPartitions = totalNumberOfPartitions;
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);
	checkArgument(numberOfSubpartitions >= 1);
	this.numberOfSubpartitions = numberOfSubpartitions;
	this.connectionIndex = connectionIndex;
}
 
Example #6
Source File: Task.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void requestPartitionProducerState(
		final IntermediateDataSetID intermediateDataSetId,
		final ResultPartitionID resultPartitionId,
		Consumer<? super ResponseHandle> responseConsumer) {

	final CompletableFuture<ExecutionState> futurePartitionState =
		partitionProducerStateChecker.requestPartitionProducerState(
			jobId,
			intermediateDataSetId,
			resultPartitionId);

	FutureUtils.assertNoException(
		futurePartitionState
			.handle(PartitionProducerStateResponseHandle::new)
			.thenAcceptAsync(responseConsumer, executor));
}
 
Example #7
Source File: TaskExecutorPartitionTrackerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testStopTrackingAndReleaseJobPartitionsFor() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final JobID jobId1 = new JobID();
	final JobID jobId2 = new JobID();
	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(jobId1, new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(jobId2, new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.stopTrackingAndReleaseJobPartitionsFor(jobId1);

	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example #8
Source File: TaskExecutorPartitionTrackerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stopTrackingAndReleaseClusterPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final IntermediateDataSetID dataSetId1 = new IntermediateDataSetID();
	final IntermediateDataSetID dataSetId2 = new IntermediateDataSetID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, dataSetId1, 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, dataSetId2, 1));
	partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1));

	partitionTracker.stopTrackingAndReleaseClusterPartitions(Collections.singleton(dataSetId1));
	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example #9
Source File: TaskExecutorPartitionTrackerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stopTrackingAndReleaseAllClusterPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1));

	partitionTracker.stopTrackingAndReleaseAllClusterPartitions();
	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example #10
Source File: TaskTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForPartitions() throws Exception {
	final PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		new IntermediateResultPartitionID(),
		ResultPartitionType.PIPELINED,
		1,
		1);
	final ShuffleDescriptor shuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
	final ResultPartitionDeploymentDescriptor dummyPartition = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		false);
	testExecutionFailsInNetworkRegistration(Collections.singleton(dummyPartition), Collections.emptyList());
}
 
Example #11
Source File: ActorGatewayPartitionProducerStateChecker.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<ExecutionState> requestPartitionProducerState(
		JobID jobId,
		IntermediateDataSetID intermediateDataSetId,
		ResultPartitionID resultPartitionId) {

	JobManagerMessages.RequestPartitionProducerState msg = new JobManagerMessages.RequestPartitionProducerState(
		jobId,
		intermediateDataSetId, resultPartitionId
	);

	scala.concurrent.Future<ExecutionState> futureResponse = jobManager
		.ask(msg, timeout)
		.mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class));

	return FutureUtils.toJava(futureResponse);
}
 
Example #12
Source File: IntermediateResult.java    From flink with Apache License 2.0 6 votes vote down vote up
public IntermediateResult(
		IntermediateDataSetID id,
		ExecutionJobVertex producer,
		int numParallelProducers,
		ResultPartitionType resultType) {

	this.id = checkNotNull(id);
	this.producer = checkNotNull(producer);

	checkArgument(numParallelProducers >= 1);
	this.numParallelProducers = numParallelProducers;

	this.partitions = new IntermediateResultPartition[numParallelProducers];

	this.numberOfRunningProducers = new AtomicInteger(numParallelProducers);

	// we do not set the intermediate result partitions here, because we let them be initialized by
	// the execution vertex that produces them

	// assign a random connection index
	this.connectionIndex = (int) (Math.random() * Integer.MAX_VALUE);

	// The runtime type for this produced result
	this.resultType = checkNotNull(resultType);
}
 
Example #13
Source File: ResultPartitionDeploymentDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static ResultPartitionDeploymentDescriptor from(
		IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) {

	final IntermediateDataSetID resultId = partition.getIntermediateResult().getId();
	final IntermediateResultPartitionID partitionId = partition.getPartitionId();
	final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType();

	// The produced data is partitioned among a number of subpartitions.
	//
	// If no consumers are known at this point, we use a single subpartition, otherwise we have
	// one for each consuming sub task.
	int numberOfSubpartitions = 1;

	if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) {

		if (partition.getConsumers().size() > 1) {
			throw new IllegalStateException("Currently, only a single consumer group per partition is supported.");
		}

		numberOfSubpartitions = partition.getConsumers().get(0).size();
	}

	return new ResultPartitionDeploymentDescriptor(
			resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling);
}
 
Example #14
Source File: InputGateFairnessTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public FairnessVerifyingInputGate(
		String owningTaskName,
		JobID jobId,
		IntermediateDataSetID consumedResultId,
		int consumedSubpartitionIndex,
		int numberOfInputChannels,
		TaskActions taskActions,
		TaskIOMetricGroup metrics,
		boolean isCreditBased) {

	super(owningTaskName, jobId, consumedResultId, ResultPartitionType.PIPELINED,
		consumedSubpartitionIndex,
			numberOfInputChannels, taskActions, metrics, isCreditBased);

	try {
		Field f = SingleInputGate.class.getDeclaredField("inputChannelsWithData");
		f.setAccessible(true);
		channelsWithData = (ArrayDeque<InputChannel>) f.get(this);
	}
	catch (Exception e) {
		throw new RuntimeException(e);
	}

	this.uniquenessChecker = new HashSet<>();
}
 
Example #15
Source File: PartialInputChannelDeploymentDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a partial input channel for the given partition and producing task.
 */
public static PartialInputChannelDeploymentDescriptor fromEdge(
		IntermediateResultPartition partition,
		Execution producer) {

	final ResultPartitionID partitionId = new ResultPartitionID(
			partition.getPartitionId(), producer.getAttemptId());

	final IntermediateResult result = partition.getIntermediateResult();

	final IntermediateDataSetID resultId = result.getId();
	final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation();
	final int partitionConnectionIndex = result.getConnectionIndex();

	return new PartialInputChannelDeploymentDescriptor(
			resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex);
}
 
Example #16
Source File: TestingSchedulingTopology.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected List<TestingSchedulingResultPartition> connect() {
	final List<TestingSchedulingResultPartition> resultPartitions = new ArrayList<>();
	final IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID();

	for (TestingSchedulingExecutionVertex producer : producers) {

		final TestingSchedulingResultPartition resultPartition = initTestingSchedulingResultPartitionBuilder()
			.withIntermediateDataSetID(intermediateDataSetId)
			.withResultPartitionState(resultPartitionState)
			.build();
		resultPartition.setProducer(producer);
		producer.addProducedPartition(resultPartition);

		for (TestingSchedulingExecutionVertex consumer : consumers) {
			consumer.addConsumedPartition(resultPartition);
			resultPartition.addConsumer(consumer);
		}
		resultPartitions.add(resultPartition);
	}

	return resultPartitions;
}
 
Example #17
Source File: JobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testGeneratingJobGraphWithUnconsumedResultPartition() {

	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<Long, Long>> input = env.fromElements(new Tuple2<>(1L, 2L))
		.setParallelism(1);

	DataSet<Tuple2<Long, Long>> ds = input.map(new IdentityMapper<>())
		.setParallelism(3);

	AbstractID intermediateDataSetID = new AbstractID();

	// this output branch will be excluded.
	ds.output(BlockingShuffleOutputFormat.createOutputFormat(intermediateDataSetID))
		.setParallelism(1);

	// this is the normal output branch.
	ds.output(new DiscardingOutputFormat<>())
		.setParallelism(1);

	JobGraph jobGraph = compileJob(env);

	Assert.assertEquals(3, jobGraph.getVerticesSortedTopologicallyFromSources().size());

	JobVertex mapVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
	Assert.assertThat(mapVertex, Matchers.instanceOf(JobVertex.class));

	// there are 2 output result with one of them is ResultPartitionType.BLOCKING_PERSISTENT
	Assert.assertEquals(2, mapVertex.getProducedDataSets().size());

	Assert.assertTrue(mapVertex.getProducedDataSets().stream()
		.anyMatch(dataSet -> dataSet.getId().equals(new IntermediateDataSetID(intermediateDataSetID)) &&
			dataSet.getResultType() == ResultPartitionType.BLOCKING_PERSISTENT));
}
 
Example #18
Source File: JobMaster.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<ExecutionState> requestPartitionState(
		final IntermediateDataSetID intermediateResultId,
		final ResultPartitionID resultPartitionId) {

	try {
		return CompletableFuture.completedFuture(schedulerNG.requestPartitionState(intermediateResultId, resultPartitionId));
	} catch (PartitionProducerDisposedException e) {
		log.info("Error while requesting partition state", e);
		return FutureUtils.completedExceptionally(e);
	}
}
 
Example #19
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForGates() throws Exception {
	final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote();
	final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] { dummyChannel });
	testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singleton(dummyGate));
}
 
Example #20
Source File: InputDependencyConstraintCheckerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
List<TestingSchedulingResultPartition> finish() {
	List<TestingSchedulingResultPartition> partitions = new ArrayList<>(dataSetCnt * partitionCntPerDataSet);
	for (int dataSetIdx = 0; dataSetIdx < dataSetCnt; dataSetIdx++) {
		IntermediateDataSetID dataSetId = new IntermediateDataSetID();
		for (int partitionIdx = 0; partitionIdx < partitionCntPerDataSet; partitionIdx++) {
			partitions.add(new TestingSchedulingResultPartition(dataSetId, partitionType, partitionState));
		}
	}

	return partitions;
}
 
Example #21
Source File: RpcPartitionStateChecker.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<ExecutionState> requestPartitionProducerState(
		JobID jobId,
		IntermediateDataSetID resultId,
		ResultPartitionID partitionId) {

	return jobMasterGateway.requestPartitionState(resultId, partitionId);
}
 
Example #22
Source File: TestingSchedulingResultPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
TestingSchedulingResultPartition(IntermediateDataSetID dataSetID, ResultPartitionType type, ResultPartitionState state) {
	this.intermediateDataSetID = dataSetID;
	this.partitionType = type;
	this.state = state;
	this.intermediateResultPartitionID = new IntermediateResultPartitionID();
	this.consumers = new ArrayList<>();
}
 
Example #23
Source File: ClusterPartitionReport.java    From flink with Apache License 2.0 5 votes vote down vote up
public ClusterPartitionReportEntry(IntermediateDataSetID dataSetId, Set<ResultPartitionID> hostedPartitions, int numTotalPartitions) {
	Preconditions.checkNotNull(dataSetId);
	Preconditions.checkNotNull(hostedPartitions);
	Preconditions.checkArgument(!hostedPartitions.isEmpty());
	Preconditions.checkArgument(numTotalPartitions > 0);
	Preconditions.checkState(hostedPartitions.size() <= numTotalPartitions);

	this.dataSetId = dataSetId;
	this.hostedPartitions = hostedPartitions;
	this.numTotalPartitions = numTotalPartitions;
}
 
Example #24
Source File: SchedulerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ExecutionState requestPartitionState(
	final IntermediateDataSetID intermediateResultId,
	final ResultPartitionID resultPartitionId) throws PartitionProducerDisposedException {

	mainThreadExecutor.assertRunningInMainThread();

	final Execution execution = executionGraph.getRegisteredExecutions().get(resultPartitionId.getProducerId());
	if (execution != null) {
		return execution.getState();
	}
	else {
		final IntermediateResult intermediateResult =
			executionGraph.getAllIntermediateResults().get(intermediateResultId);

		if (intermediateResult != null) {
			// Try to find the producing execution
			Execution producerExecution = intermediateResult
				.getPartitionById(resultPartitionId.getPartitionId())
				.getProducer()
				.getCurrentExecutionAttempt();

			if (producerExecution.getAttemptId().equals(resultPartitionId.getProducerId())) {
				return producerExecution.getState();
			} else {
				throw new PartitionProducerDisposedException(resultPartitionId);
			}
		} else {
			throw new IllegalArgumentException("Intermediate data set with ID "
				+ intermediateResultId + " not found.");
		}
	}
}
 
Example #25
Source File: SingleInputGate.java    From flink with Apache License 2.0 5 votes vote down vote up
public SingleInputGate(
	String owningTaskName,
	IntermediateDataSetID consumedResultId,
	final ResultPartitionType consumedPartitionType,
	int consumedSubpartitionIndex,
	int numberOfInputChannels,
	PartitionProducerStateProvider partitionProducerStateProvider,
	boolean isCreditBased,
	SupplierWithException<BufferPool, IOException> bufferPoolFactory) {

	this.owningTaskName = checkNotNull(owningTaskName);

	this.consumedResultId = checkNotNull(consumedResultId);
	this.consumedPartitionType = checkNotNull(consumedPartitionType);
	this.bufferPoolFactory = checkNotNull(bufferPoolFactory);

	checkArgument(consumedSubpartitionIndex >= 0);
	this.consumedSubpartitionIndex = consumedSubpartitionIndex;

	checkArgument(numberOfInputChannels > 0);
	this.numberOfInputChannels = numberOfInputChannels;

	this.inputChannels = new HashMap<>(numberOfInputChannels);
	this.channelsWithEndOfPartitionEvents = new BitSet(numberOfInputChannels);
	this.enqueuedInputChannelsWithData = new BitSet(numberOfInputChannels);

	this.partitionProducerStateProvider = checkNotNull(partitionProducerStateProvider);

	this.isCreditBased = isCreditBased;

	this.closeFuture = new CompletableFuture<>();
}
 
Example #26
Source File: InputDependencyConstraintCheckerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
List<TestingSchedulingResultPartition> finish() {
	List<TestingSchedulingResultPartition> partitions = new ArrayList<>(dataSetCnt * partitionCntPerDataSet);
	for (int dataSetIdx = 0; dataSetIdx < dataSetCnt; dataSetIdx++) {
		IntermediateDataSetID dataSetId = new IntermediateDataSetID();
		for (int partitionIdx = 0; partitionIdx < partitionCntPerDataSet; partitionIdx++) {
			partitions.add(new TestingSchedulingResultPartition(dataSetId, partitionType, partitionState));
		}
	}

	return partitions;
}
 
Example #27
Source File: InputGateDeploymentDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
public InputGateDeploymentDescriptor(
		IntermediateDataSetID consumedResultId,
		ResultPartitionType consumedPartitionType,
		@Nonnegative int consumedSubpartitionIndex,
		ShuffleDescriptor[] inputChannels) {
	this.consumedResultId = checkNotNull(consumedResultId);
	this.consumedPartitionType = checkNotNull(consumedPartitionType);
	this.consumedSubpartitionIndex = consumedSubpartitionIndex;
	this.inputChannels = checkNotNull(inputChannels);
}
 
Example #28
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForGates() throws Exception {
	final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote();
	final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] { dummyChannel });
	testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singletonList(dummyGate));
}
 
Example #29
Source File: JobGraphGenerator.java    From flink with Apache License 2.0 5 votes vote down vote up
private boolean checkAndConfigurePersistentIntermediateResult(PlanNode node) {
	if (!(node instanceof SinkPlanNode)) {
		return false;
	}

	final Object userCodeObject = node.getProgramOperator().getUserCodeWrapper().getUserCodeObject();
	if (!(userCodeObject instanceof BlockingShuffleOutputFormat)) {
		return false;
	}

	final Iterator<Channel> inputIterator = node.getInputs().iterator();
	checkState(inputIterator.hasNext(), "SinkPlanNode must have a input.");

	final PlanNode predecessorNode = inputIterator.next().getSource();
	final JobVertex predecessorVertex = (vertices.containsKey(predecessorNode)) ?
		vertices.get(predecessorNode) :
		chainedTasks.get(predecessorNode).getContainingVertex();

	checkState(predecessorVertex != null, "Bug: Chained task has not been assigned its containing vertex when connecting.");

	predecessorVertex.createAndAddResultDataSet(
			// use specified intermediateDataSetID
			new IntermediateDataSetID(((BlockingShuffleOutputFormat) userCodeObject).getIntermediateDataSetId()),
			ResultPartitionType.BLOCKING_PERSISTENT);

	// remove this node so the OutputFormatVertex will not shown in the final JobGraph.
	vertices.remove(node);
	return true;
}
 
Example #30
Source File: InputGateConcurrentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumptionWithLocalChannels() throws Exception {
	final int numberOfChannels = 11;
	final int buffersPerChannel = 1000;

	final ResultPartition resultPartition = mock(ResultPartition.class);

	final PipelinedSubpartition[] partitions = new PipelinedSubpartition[numberOfChannels];
	final Source[] sources = new Source[numberOfChannels];

	final ResultPartitionManager resultPartitionManager = createResultPartitionManager(partitions);

	final SingleInputGate gate = new SingleInputGate(
			"Test Task Name",
			new JobID(),
			new IntermediateDataSetID(), ResultPartitionType.PIPELINED,
			0, numberOfChannels,
			mock(TaskActions.class),
			UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
			true);

	for (int i = 0; i < numberOfChannels; i++) {
		LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(),
				resultPartitionManager, mock(TaskEventDispatcher.class), UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup());
		gate.setInputChannel(new IntermediateResultPartitionID(), channel);

		partitions[i] = new PipelinedSubpartition(0, resultPartition);
		sources[i] = new PipelinedSubpartitionSource(partitions[i]);
	}

	ProducerThread producer = new ProducerThread(sources, numberOfChannels * buffersPerChannel, 4, 10);
	ConsumerThread consumer = new ConsumerThread(gate, numberOfChannels * buffersPerChannel);
	producer.start();
	consumer.start();

	// the 'sync()' call checks for exceptions and failed assertions
	producer.sync();
	consumer.sync();
}