Java Code Examples for org.apache.flink.runtime.jobgraph.IntermediateDataSetID

The following examples show how to use org.apache.flink.runtime.jobgraph.IntermediateDataSetID. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
/**
 * Creates a partial input channel for the given partition and producing task.
 */
public static PartialInputChannelDeploymentDescriptor fromEdge(
		IntermediateResultPartition partition,
		Execution producer) {

	final ResultPartitionID partitionId = new ResultPartitionID(
			partition.getPartitionId(), producer.getAttemptId());

	final IntermediateResult result = partition.getIntermediateResult();

	final IntermediateDataSetID resultId = result.getId();
	final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation();
	final int partitionConnectionIndex = result.getConnectionIndex();

	return new PartialInputChannelDeploymentDescriptor(
			resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex);
}
 
Example 2
public static ResultPartitionDeploymentDescriptor from(
		IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) {

	final IntermediateDataSetID resultId = partition.getIntermediateResult().getId();
	final IntermediateResultPartitionID partitionId = partition.getPartitionId();
	final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType();

	// The produced data is partitioned among a number of subpartitions.
	//
	// If no consumers are known at this point, we use a single subpartition, otherwise we have
	// one for each consuming sub task.
	int numberOfSubpartitions = 1;

	if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) {

		if (partition.getConsumers().size() > 1) {
			throw new IllegalStateException("Currently, only a single consumer group per partition is supported.");
		}

		numberOfSubpartitions = partition.getConsumers().get(0).size();
	}

	return new ResultPartitionDeploymentDescriptor(
			resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling);
}
 
Example 3
@Override
public CompletableFuture<ExecutionState> requestPartitionProducerState(
		JobID jobId,
		IntermediateDataSetID intermediateDataSetId,
		ResultPartitionID resultPartitionId) {

	JobManagerMessages.RequestPartitionProducerState msg = new JobManagerMessages.RequestPartitionProducerState(
		jobId,
		intermediateDataSetId, resultPartitionId
	);

	scala.concurrent.Future<ExecutionState> futureResponse = jobManager
		.ask(msg, timeout)
		.mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class));

	return FutureUtils.toJava(futureResponse);
}
 
Example 4
Source Project: flink   Source File: TaskExecutorSubmissionTest.java    License: Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example 5
Source Project: flink   Source File: TaskTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForPartitions() throws Exception {
	final PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		new IntermediateResultPartitionID(),
		ResultPartitionType.PIPELINED,
		1,
		1);
	final ShuffleDescriptor shuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
	final ResultPartitionDeploymentDescriptor dummyPartition = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		false);
	testExecutionFailsInNetworkRegistration(Collections.singleton(dummyPartition), Collections.emptyList());
}
 
Example 6
Source Project: flink   Source File: TestingSchedulingTopology.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected List<TestingSchedulingResultPartition> connect() {
	final List<TestingSchedulingResultPartition> resultPartitions = new ArrayList<>();
	final IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID();

	for (TestingSchedulingExecutionVertex producer : producers) {

		final TestingSchedulingResultPartition resultPartition = initTestingSchedulingResultPartitionBuilder()
			.withIntermediateDataSetID(intermediateDataSetId)
			.withResultPartitionState(resultPartitionState)
			.build();
		resultPartition.setProducer(producer);
		producer.addProducedPartition(resultPartition);

		for (TestingSchedulingExecutionVertex consumer : consumers) {
			consumer.addConsumedPartition(resultPartition);
			resultPartition.addConsumer(consumer);
		}
		resultPartitions.add(resultPartition);
	}

	return resultPartitions;
}
 
Example 7
@Before
public void setUp() throws Exception {

	intermediateResultPartitionId = new IntermediateResultPartitionID();

	DefaultSchedulingResultPartition schedulingResultPartition = new DefaultSchedulingResultPartition(
		intermediateResultPartitionId,
		new IntermediateDataSetID(),
		BLOCKING);
	producerVertex = new DefaultSchedulingExecutionVertex(
		new ExecutionVertexID(new JobVertexID(), 0),
		Collections.singletonList(schedulingResultPartition),
		stateSupplier,
		ANY);
	schedulingResultPartition.setProducer(producerVertex);
	consumerVertex = new DefaultSchedulingExecutionVertex(
		new ExecutionVertexID(new JobVertexID(), 0),
		Collections.emptyList(),
		stateSupplier,
		ANY);
	consumerVertex.addConsumedPartition(schedulingResultPartition);
}
 
Example 8
Source Project: flink   Source File: PartitionDescriptor.java    License: Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public PartitionDescriptor(
		IntermediateDataSetID resultId,
		int totalNumberOfPartitions,
		IntermediateResultPartitionID partitionId,
		ResultPartitionType partitionType,
		int numberOfSubpartitions,
		int connectionIndex) {
	this.resultId = checkNotNull(resultId);
	checkArgument(totalNumberOfPartitions >= 1);
	this.totalNumberOfPartitions = totalNumberOfPartitions;
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);
	checkArgument(numberOfSubpartitions >= 1);
	this.numberOfSubpartitions = numberOfSubpartitions;
	this.connectionIndex = connectionIndex;
}
 
Example 9
@Test
public void stopTrackingAndReleaseClusterPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final IntermediateDataSetID dataSetId1 = new IntermediateDataSetID();
	final IntermediateDataSetID dataSetId2 = new IntermediateDataSetID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, dataSetId1, 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, dataSetId2, 1));
	partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1));

	partitionTracker.stopTrackingAndReleaseClusterPartitions(Collections.singleton(dataSetId1));
	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example 10
Source Project: Flink-CEPplus   Source File: InputGateFairnessTest.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public FairnessVerifyingInputGate(
		String owningTaskName,
		JobID jobId,
		IntermediateDataSetID consumedResultId,
		int consumedSubpartitionIndex,
		int numberOfInputChannels,
		TaskActions taskActions,
		TaskIOMetricGroup metrics,
		boolean isCreditBased) {

	super(owningTaskName, jobId, consumedResultId, ResultPartitionType.PIPELINED,
		consumedSubpartitionIndex,
			numberOfInputChannels, taskActions, metrics, isCreditBased);

	try {
		Field f = SingleInputGate.class.getDeclaredField("inputChannelsWithData");
		f.setAccessible(true);
		channelsWithData = (ArrayDeque<InputChannel>) f.get(this);
	}
	catch (Exception e) {
		throw new RuntimeException(e);
	}

	this.uniquenessChecker = new HashSet<>();
}
 
Example 11
Source Project: flink   Source File: Task.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void requestPartitionProducerState(
		final IntermediateDataSetID intermediateDataSetId,
		final ResultPartitionID resultPartitionId,
		Consumer<? super ResponseHandle> responseConsumer) {

	final CompletableFuture<ExecutionState> futurePartitionState =
		partitionProducerStateChecker.requestPartitionProducerState(
			jobId,
			intermediateDataSetId,
			resultPartitionId);

	FutureUtils.assertNoException(
		futurePartitionState
			.handle(PartitionProducerStateResponseHandle::new)
			.thenAcceptAsync(responseConsumer, executor));
}
 
Example 12
@Test
public void stopTrackingAndReleaseAllClusterPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1));

	partitionTracker.stopTrackingAndReleaseAllClusterPartitions();
	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example 13
Source Project: flink   Source File: IntermediateResult.java    License: Apache License 2.0 6 votes vote down vote up
public IntermediateResult(
		IntermediateDataSetID id,
		ExecutionJobVertex producer,
		int numParallelProducers,
		ResultPartitionType resultType) {

	this.id = checkNotNull(id);
	this.producer = checkNotNull(producer);

	checkArgument(numParallelProducers >= 1);
	this.numParallelProducers = numParallelProducers;

	this.partitions = new IntermediateResultPartition[numParallelProducers];

	this.numberOfRunningProducers = new AtomicInteger(numParallelProducers);

	// we do not set the intermediate result partitions here, because we let them be initialized by
	// the execution vertex that produces them

	// assign a random connection index
	this.connectionIndex = (int) (Math.random() * Integer.MAX_VALUE);

	// The runtime type for this produced result
	this.resultType = checkNotNull(resultType);
}
 
Example 14
@Test
public void testStopTrackingAndReleaseJobPartitionsFor() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final JobID jobId1 = new JobID();
	final JobID jobId2 = new JobID();
	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(jobId1, new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(jobId2, new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.stopTrackingAndReleaseJobPartitionsFor(jobId1);

	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example 15
@Test
public void testStopTrackingAndReleaseJobPartitions() throws Exception {
	final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
	final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>();
	testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;

	final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
	final ResultPartitionID resultPartitionId2 = new ResultPartitionID();

	final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId1, new IntermediateDataSetID(), 1));
	partitionTracker.startTrackingPartition(new JobID(), new TaskExecutorPartitionInfo(resultPartitionId2, new IntermediateDataSetID(), 1));
	partitionTracker.stopTrackingAndReleaseJobPartitions(Collections.singleton(resultPartitionId1));

	assertThat(shuffleReleaseFuture.get(), hasItem(resultPartitionId1));
}
 
Example 16
Source Project: flink   Source File: TestingSchedulingTopology.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected List<TestingSchedulingResultPartition> connect() {
	final List<TestingSchedulingResultPartition> resultPartitions = new ArrayList<>();
	final IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID();

	for (TestingSchedulingExecutionVertex producer : producers) {

		final TestingSchedulingResultPartition resultPartition = initTestingSchedulingResultPartitionBuilder()
			.withIntermediateDataSetID(intermediateDataSetId)
			.withResultPartitionState(resultPartitionState)
			.build();
		resultPartition.setProducer(producer);
		producer.addProducedPartition(resultPartition);

		for (TestingSchedulingExecutionVertex consumer : consumers) {
			consumer.addConsumedPartition(resultPartition);
			resultPartition.addConsumer(consumer);
		}
		resultPartitions.add(resultPartition);
	}

	return resultPartitions;
}
 
Example 17
Source Project: flink   Source File: SingleInputGate.java    License: Apache License 2.0 5 votes vote down vote up
public SingleInputGate(
	String owningTaskName,
	IntermediateDataSetID consumedResultId,
	final ResultPartitionType consumedPartitionType,
	int consumedSubpartitionIndex,
	int numberOfInputChannels,
	PartitionProducerStateProvider partitionProducerStateProvider,
	boolean isCreditBased,
	SupplierWithException<BufferPool, IOException> bufferPoolFactory) {

	this.owningTaskName = checkNotNull(owningTaskName);

	this.consumedResultId = checkNotNull(consumedResultId);
	this.consumedPartitionType = checkNotNull(consumedPartitionType);
	this.bufferPoolFactory = checkNotNull(bufferPoolFactory);

	checkArgument(consumedSubpartitionIndex >= 0);
	this.consumedSubpartitionIndex = consumedSubpartitionIndex;

	checkArgument(numberOfInputChannels > 0);
	this.numberOfInputChannels = numberOfInputChannels;

	this.inputChannels = new HashMap<>(numberOfInputChannels);
	this.channelsWithEndOfPartitionEvents = new BitSet(numberOfInputChannels);
	this.enqueuedInputChannelsWithData = new BitSet(numberOfInputChannels);

	this.partitionProducerStateProvider = checkNotNull(partitionProducerStateProvider);

	this.isCreditBased = isCreditBased;

	this.closeFuture = new CompletableFuture<>();
}
 
Example 18
public PartialInputChannelDeploymentDescriptor(
		IntermediateDataSetID resultId,
		ResultPartitionID partitionID,
		TaskManagerLocation partitionTaskManagerLocation,
		int partitionConnectionIndex) {

	this.resultId = checkNotNull(resultId);
	this.partitionID = checkNotNull(partitionID);
	this.partitionTaskManagerLocation = checkNotNull(partitionTaskManagerLocation);
	this.partitionConnectionIndex = partitionConnectionIndex;
}
 
Example 19
Source Project: flink   Source File: InputDependencyConstraintChecker.java    License: Apache License 2.0 5 votes vote down vote up
private SchedulingIntermediateDataSet getSchedulingIntermediateDataSetInternal(
		final IntermediateDataSetID intermediateDataSetId,
		boolean createIfAbsent) {

	return intermediateDataSets.computeIfAbsent(
		intermediateDataSetId,
		(key) -> {
			if (createIfAbsent) {
				return new SchedulingIntermediateDataSet();
			} else {
				throw new IllegalArgumentException("can not find data set for " + intermediateDataSetId);
			}
		});
}
 
Example 20
Source Project: flink   Source File: DefaultLogicalTopology.java    License: Apache License 2.0 5 votes vote down vote up
private void buildVerticesAndResults(final JobGraph jobGraph) {
	final Function<JobVertexID, DefaultLogicalVertex> vertexRetriever = this::getVertex;
	final Function<IntermediateDataSetID, DefaultLogicalResult> resultRetriever = this::getResult;

	for (JobVertex jobVertex : jobGraph.getVerticesSortedTopologicallyFromSources()) {
		final DefaultLogicalVertex logicalVertex = new DefaultLogicalVertex(jobVertex, resultRetriever);
		this.verticesSorted.add(logicalVertex);
		this.idToVertexMap.put(logicalVertex.getId(), logicalVertex);

		for (IntermediateDataSet intermediateDataSet : jobVertex.getProducedDataSets()) {
			final DefaultLogicalResult logicalResult = new DefaultLogicalResult(intermediateDataSet, vertexRetriever);
			idToResultMap.put(logicalResult.getId(), logicalResult);
		}
	}
}
 
Example 21
Source Project: Flink-CEPplus   Source File: RpcPartitionStateChecker.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<ExecutionState> requestPartitionProducerState(
		JobID jobId,
		IntermediateDataSetID resultId,
		ResultPartitionID partitionId) {

	return jobMasterGateway.requestPartitionState(resultId, partitionId);
}
 
Example 22
Source Project: Flink-CEPplus   Source File: ExecutionJobVertex.java    License: Apache License 2.0 5 votes vote down vote up
public void connectToPredecessors(Map<IntermediateDataSetID, IntermediateResult> intermediateDataSets) throws JobException {

		List<JobEdge> inputs = jobVertex.getInputs();

		if (LOG.isDebugEnabled()) {
			LOG.debug(String.format("Connecting ExecutionJobVertex %s (%s) to %d predecessors.", jobVertex.getID(), jobVertex.getName(), inputs.size()));
		}

		for (int num = 0; num < inputs.size(); num++) {
			JobEdge edge = inputs.get(num);

			if (LOG.isDebugEnabled()) {
				if (edge.getSource() == null) {
					LOG.debug(String.format("Connecting input %d of vertex %s (%s) to intermediate result referenced via ID %s.",
							num, jobVertex.getID(), jobVertex.getName(), edge.getSourceId()));
				} else {
					LOG.debug(String.format("Connecting input %d of vertex %s (%s) to intermediate result referenced via predecessor %s (%s).",
							num, jobVertex.getID(), jobVertex.getName(), edge.getSource().getProducer().getID(), edge.getSource().getProducer().getName()));
				}
			}

			// fetch the intermediate result via ID. if it does not exist, then it either has not been created, or the order
			// in which this method is called for the job vertices is not a topological order
			IntermediateResult ires = intermediateDataSets.get(edge.getSourceId());
			if (ires == null) {
				throw new JobException("Cannot connect this job graph to the previous graph. No previous intermediate result found for ID "
						+ edge.getSourceId());
			}

			this.inputs.add(ires);

			int consumerIndex = ires.registerConsumer();

			for (int i = 0; i < parallelism; i++) {
				ExecutionVertex ev = taskVertices[i];
				ev.connectSource(num, ires, edge, consumerIndex);
			}
		}
	}
 
Example 23
Source Project: flink   Source File: ResourceManagerPartitionTrackerImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Updates the data sets for which the given task executor is hosting partitions and returns data sets that were
 * corrupted due to a loss of partitions.
 *
 * @param taskExecutorId ID of the hosting TaskExecutor
 * @param reportEntries  IDs of data sets for which partitions are hosted
 * @return corrupted data sets
 */
private Set<IntermediateDataSetID> setHostedDataSetsAndCheckCorruption(ResourceID taskExecutorId, Collection<ClusterPartitionReport.ClusterPartitionReportEntry> reportEntries) {
	final Set<IntermediateDataSetID> currentlyHostedDatasets = reportEntries
		.stream()
		.map(ClusterPartitionReport.ClusterPartitionReportEntry::getDataSetId)
		.collect(Collectors.toSet());

	final Set<IntermediateDataSetID> previouslyHostedDataSets = taskExecutorToDataSets.put(
		taskExecutorId,
		currentlyHostedDatasets);

	// previously tracked data sets may be corrupted since we may be tracking less partitions than before
	final Set<IntermediateDataSetID> potentiallyCorruptedDataSets = Optional
		.ofNullable(previouslyHostedDataSets)
		.orElse(new HashSet<>(0));

	// update data set -> task executor mapping and find datasets for which lost a partition
	reportEntries.forEach(hostedPartition -> {
		final Map<ResourceID, Set<ResultPartitionID>> taskExecutorHosts = dataSetToTaskExecutors.computeIfAbsent(hostedPartition.getDataSetId(), ignored -> new HashMap<>());
		final Set<ResultPartitionID> previouslyHostedPartitions = taskExecutorHosts.put(taskExecutorId, hostedPartition.getHostedPartitions());

		final boolean noPartitionLost = previouslyHostedPartitions == null || hostedPartition.getHostedPartitions().containsAll(previouslyHostedPartitions);
		if (noPartitionLost) {
			potentiallyCorruptedDataSets.remove(hostedPartition.getDataSetId());
		}
	});

	// now only contains data sets for which a partition is no longer tracked
	return potentiallyCorruptedDataSets;
}
 
Example 24
Source Project: flink   Source File: SchedulerBase.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ExecutionState requestPartitionState(
	final IntermediateDataSetID intermediateResultId,
	final ResultPartitionID resultPartitionId) throws PartitionProducerDisposedException {

	mainThreadExecutor.assertRunningInMainThread();

	final Execution execution = executionGraph.getRegisteredExecutions().get(resultPartitionId.getProducerId());
	if (execution != null) {
		return execution.getState();
	}
	else {
		final IntermediateResult intermediateResult =
			executionGraph.getAllIntermediateResults().get(intermediateResultId);

		if (intermediateResult != null) {
			// Try to find the producing execution
			Execution producerExecution = intermediateResult
				.getPartitionById(resultPartitionId.getPartitionId())
				.getProducer()
				.getCurrentExecutionAttempt();

			if (producerExecution.getAttemptId().equals(resultPartitionId.getProducerId())) {
				return producerExecution.getState();
			} else {
				throw new PartitionProducerDisposedException(resultPartitionId);
			}
		} else {
			throw new IllegalArgumentException("Intermediate data set with ID "
				+ intermediateResultId + " not found.");
		}
	}
}
 
Example 25
Source Project: flink   Source File: JobGraphGenerator.java    License: Apache License 2.0 5 votes vote down vote up
private boolean checkAndConfigurePersistentIntermediateResult(PlanNode node) {
	if (!(node instanceof SinkPlanNode)) {
		return false;
	}

	final Object userCodeObject = node.getProgramOperator().getUserCodeWrapper().getUserCodeObject();
	if (!(userCodeObject instanceof BlockingShuffleOutputFormat)) {
		return false;
	}

	final Iterator<Channel> inputIterator = node.getInputs().iterator();
	checkState(inputIterator.hasNext(), "SinkPlanNode must have a input.");

	final PlanNode predecessorNode = inputIterator.next().getSource();
	final JobVertex predecessorVertex = (vertices.containsKey(predecessorNode)) ?
		vertices.get(predecessorNode) :
		chainedTasks.get(predecessorNode).getContainingVertex();

	checkState(predecessorVertex != null, "Bug: Chained task has not been assigned its containing vertex when connecting.");

	predecessorVertex.createAndAddResultDataSet(
			// use specified intermediateDataSetID
			new IntermediateDataSetID(((BlockingShuffleOutputFormat) userCodeObject).getIntermediateDataSetId()),
			ResultPartitionType.BLOCKING_PERSISTENT);

	// remove this node so the OutputFormatVertex will not shown in the final JobGraph.
	vertices.remove(node);
	return true;
}
 
Example 26
Source Project: Flink-CEPplus   Source File: InputGateConcurrentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumptionWithLocalChannels() throws Exception {
	final int numberOfChannels = 11;
	final int buffersPerChannel = 1000;

	final ResultPartition resultPartition = mock(ResultPartition.class);

	final PipelinedSubpartition[] partitions = new PipelinedSubpartition[numberOfChannels];
	final Source[] sources = new Source[numberOfChannels];

	final ResultPartitionManager resultPartitionManager = createResultPartitionManager(partitions);

	final SingleInputGate gate = new SingleInputGate(
			"Test Task Name",
			new JobID(),
			new IntermediateDataSetID(), ResultPartitionType.PIPELINED,
			0, numberOfChannels,
			mock(TaskActions.class),
			UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
			true);

	for (int i = 0; i < numberOfChannels; i++) {
		LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(),
				resultPartitionManager, mock(TaskEventDispatcher.class), UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup());
		gate.setInputChannel(new IntermediateResultPartitionID(), channel);

		partitions[i] = new PipelinedSubpartition(0, resultPartition);
		sources[i] = new PipelinedSubpartitionSource(partitions[i]);
	}

	ProducerThread producer = new ProducerThread(sources, numberOfChannels * buffersPerChannel, 4, 10);
	ConsumerThread consumer = new ConsumerThread(gate, numberOfChannels * buffersPerChannel);
	producer.start();
	consumer.start();

	// the 'sync()' call checks for exceptions and failed assertions
	producer.sync();
	consumer.sync();
}
 
Example 27
Source Project: flink   Source File: TaskTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForGates() throws Exception {
	final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote();
	final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] { dummyChannel });
	testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singletonList(dummyGate));
}
 
Example 28
Source Project: flink   Source File: InputGateDeploymentDescriptor.java    License: Apache License 2.0 5 votes vote down vote up
public InputGateDeploymentDescriptor(
		IntermediateDataSetID consumedResultId,
		ResultPartitionType consumedPartitionType,
		@Nonnegative int consumedSubpartitionIndex,
		ShuffleDescriptor[] inputChannels) {
	this.consumedResultId = checkNotNull(consumedResultId);
	this.consumedPartitionType = checkNotNull(consumedPartitionType);
	this.consumedSubpartitionIndex = consumedSubpartitionIndex;
	this.inputChannels = checkNotNull(inputChannels);
}
 
Example 29
List<TestingSchedulingResultPartition> finish() {
	List<TestingSchedulingResultPartition> partitions = new ArrayList<>(dataSetCnt * partitionCntPerDataSet);
	for (int dataSetIdx = 0; dataSetIdx < dataSetCnt; dataSetIdx++) {
		IntermediateDataSetID dataSetId = new IntermediateDataSetID();
		for (int partitionIdx = 0; partitionIdx < partitionCntPerDataSet; partitionIdx++) {
			partitions.add(new TestingSchedulingResultPartition(dataSetId, partitionType, partitionState));
		}
	}

	return partitions;
}
 
Example 30
Source Project: flink   Source File: ClusterPartitionReport.java    License: Apache License 2.0 5 votes vote down vote up
public ClusterPartitionReportEntry(IntermediateDataSetID dataSetId, Set<ResultPartitionID> hostedPartitions, int numTotalPartitions) {
	Preconditions.checkNotNull(dataSetId);
	Preconditions.checkNotNull(hostedPartitions);
	Preconditions.checkArgument(!hostedPartitions.isEmpty());
	Preconditions.checkArgument(numTotalPartitions > 0);
	Preconditions.checkState(hostedPartitions.size() <= numTotalPartitions);

	this.dataSetId = dataSetId;
	this.hostedPartitions = hostedPartitions;
	this.numTotalPartitions = numTotalPartitions;
}