org.apache.flink.runtime.io.network.partition.ResultPartition Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.partition.ResultPartition.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskIOMetricGroup.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public Float getValue() { int usedBuffers = 0; int bufferPoolSize = 0; for (ResultPartition resultPartition : task.getProducedPartitions()) { usedBuffers += resultPartition.getBufferPool().bestEffortGetNumOfUsedBuffers(); bufferPoolSize += resultPartition.getBufferPool().getNumBuffers(); } if (bufferPoolSize != 0) { return ((float) usedBuffers) / bufferPoolSize; } else { return 0.0f; } }
Example #2
Source File: NetworkEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public void registerTask(Task task) throws IOException { final ResultPartition[] producedPartitions = task.getProducedPartitions(); synchronized (lock) { if (isShutdown) { throw new IllegalStateException("NetworkEnvironment is shut down"); } for (final ResultPartition partition : producedPartitions) { setupPartition(partition); } // Setup the buffer pool for each buffer reader final SingleInputGate[] inputGates = task.getAllInputGates(); for (SingleInputGate gate : inputGates) { setupInputGate(gate); } } }
Example #3
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 6 votes |
@Override public List<ResultPartition> createResultPartitionWriters( ShuffleIOOwnerContext ownerContext, List<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors) { synchronized (lock) { Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down."); ResultPartition[] resultPartitions = new ResultPartition[resultPartitionDeploymentDescriptors.size()]; for (int partitionIndex = 0; partitionIndex < resultPartitions.length; partitionIndex++) { resultPartitions[partitionIndex] = resultPartitionFactory.create( ownerContext.getOwnerName(), partitionIndex, resultPartitionDeploymentDescriptors.get(partitionIndex)); } registerOutputMetrics(config.isNetworkDetailedMetrics(), ownerContext.getOutputGroup(), resultPartitions); return Arrays.asList(resultPartitions); } }
Example #4
Source File: NetworkEnvironmentTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Helper to create simple {@link ResultPartition} instance for use by a {@link Task} inside * {@link NetworkEnvironment#registerTask(Task)}. * * @param partitionType * the produced partition type * @param channels * the number of output channels * * @return instance with minimal data set and some mocks so that it is useful for {@link * NetworkEnvironment#registerTask(Task)} */ private static ResultPartition createResultPartition( final ResultPartitionType partitionType, final int channels) { return new ResultPartition( "TestTask-" + partitionType + ":" + channels, mock(TaskActions.class), new JobID(), new ResultPartitionID(), partitionType, channels, channels, mock(ResultPartitionManager.class), new NoOpResultPartitionConsumableNotifier(), mock(IOManager.class), false); }
Example #5
Source File: NetworkEnvironmentTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static void createRemoteInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartition resultPartition, ConnectionManager connManager) { RemoteInputChannel channel = new RemoteInputChannel( inputGate, channelIndex, resultPartition.getPartitionId(), mock(ConnectionID.class), connManager, 0, 0, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup()); inputGate.setInputChannel(resultPartition.getPartitionId().getPartitionId(), channel); }
Example #6
Source File: StreamNetworkBenchmarkEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
protected ResultPartitionWriter createResultPartition( JobID jobId, ResultPartitionID partitionId, NetworkEnvironment environment, int channels) throws Exception { ResultPartition resultPartition = new ResultPartition( "sender task", new NoOpTaskActions(), jobId, partitionId, ResultPartitionType.PIPELINED_BOUNDED, channels, 1, environment.getResultPartitionManager(), new NoOpResultPartitionConsumableNotifier(), ioManager, false); environment.setupPartition(resultPartition); return resultPartition; }
Example #7
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 6 votes |
@Override public Collection<ResultPartition> createResultPartitionWriters( ShuffleIOOwnerContext ownerContext, Collection<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors) { synchronized (lock) { Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down."); ResultPartition[] resultPartitions = new ResultPartition[resultPartitionDeploymentDescriptors.size()]; int counter = 0; for (ResultPartitionDeploymentDescriptor rpdd : resultPartitionDeploymentDescriptors) { resultPartitions[counter++] = resultPartitionFactory.create(ownerContext.getOwnerName(), rpdd); } registerOutputMetrics(config.isNetworkDetailedMetrics(), ownerContext.getOutputGroup(), resultPartitions); return Arrays.asList(resultPartitions); } }
Example #8
Source File: OutputBufferPoolUsageGauge.java From flink with Apache License 2.0 | 6 votes |
@Override public Float getValue() { int usedBuffers = 0; int bufferPoolSize = 0; for (ResultPartition resultPartition : resultPartitions) { BufferPool bufferPool = resultPartition.getBufferPool(); if (bufferPool != null) { usedBuffers += bufferPool.bestEffortGetNumOfUsedBuffers(); bufferPoolSize += bufferPool.getNumBuffers(); } } if (bufferPoolSize != 0) { return ((float) usedBuffers) / bufferPoolSize; } else { return 0.0f; } }
Example #9
Source File: OutputBufferPoolUsageGauge.java From flink with Apache License 2.0 | 6 votes |
@Override public Float getValue() { int usedBuffers = 0; int bufferPoolSize = 0; for (ResultPartition resultPartition : resultPartitions) { BufferPool bufferPool = resultPartition.getBufferPool(); if (bufferPool != null) { usedBuffers += bufferPool.bestEffortGetNumOfUsedBuffers(); bufferPoolSize += bufferPool.getNumBuffers(); } } if (bufferPoolSize != 0) { return ((float) usedBuffers) / bufferPoolSize; } else { return 0.0f; } }
Example #10
Source File: NettyShuffleMetricFactory.java From flink with Apache License 2.0 | 6 votes |
/** * Registers legacy network metric groups before shuffle service refactoring. * * <p>Registers legacy metric groups if shuffle service implementation is original default one. * * @deprecated should be removed in future */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public static void registerLegacyNetworkMetrics( boolean isDetailedMetrics, MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) { checkNotNull(metricGroup); checkNotNull(producedPartitions); checkNotNull(inputGates); // add metrics for buffers final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED); // similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup (metricGroup) final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED); final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT); final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT); ResultPartition[] resultPartitions = Arrays.copyOf(producedPartitions, producedPartitions.length, ResultPartition[].class); registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions); SingleInputGate[] singleInputGates = Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class); registerInputMetrics(isDetailedMetrics, inputGroup, buffersGroup, singleInputGates); }
Example #11
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
private static ResultPartition createFinishedPartitionWithFilledData(ResultPartitionManager partitionManager) throws Exception { final ResultPartition partition = new ResultPartitionBuilder() .setResultPartitionType(ResultPartitionType.BLOCKING) .setFileChannelManager(fileChannelManager) .setResultPartitionManager(partitionManager) .isReleasedOnConsumption(true) .build(); partitionManager.registerResultPartition(partition); PartitionTestUtils.writeBuffers(partition, 1, BUFFER_SIZE); return partition; }
Example #12
Source File: InputBuffersMetricsTest.java From flink with Apache License 2.0 | 5 votes |
private RemoteInputChannel buildRemoteChannel( int channelIndex, SingleInputGate inputGate, NettyShuffleEnvironment network, ResultPartition partition) { return new InputChannelBuilder() .setPartitionId(partition.getPartitionId()) .setChannelIndex(channelIndex) .setupFromNettyShuffleEnvironment(network) .setConnectionManager(new TestingConnectionManager()) .buildRemoteAndSetToGate(inputGate); }
Example #13
Source File: InputBuffersMetricsTest.java From flink with Apache License 2.0 | 5 votes |
private void buildLocalChannel( int channelIndex, SingleInputGate inputGate, NettyShuffleEnvironment network, ResultPartition partition) { new InputChannelBuilder() .setPartitionId(partition.getPartitionId()) .setChannelIndex(channelIndex) .setupFromNettyShuffleEnvironment(network) .setConnectionManager(new TestingConnectionManager()) .buildLocalAndSetToGate(inputGate); }
Example #14
Source File: TestPartitionProducer.java From flink with Apache License 2.0 | 5 votes |
public TestPartitionProducer( ResultPartition partition, boolean isSlowProducer, TestProducerSource source) { this.partition = checkNotNull(partition); this.isSlowProducer = isSlowProducer; this.random = isSlowProducer ? new Random() : null; this.source = checkNotNull(source); }
Example #15
Source File: ResultPartitionMetrics.java From flink with Apache License 2.0 | 5 votes |
public static void registerQueueLengthMetrics(MetricGroup parent, ResultPartition[] partitions) { for (int i = 0; i < partitions.length; i++) { ResultPartitionMetrics metrics = new ResultPartitionMetrics(partitions[i]); MetricGroup group = parent.addGroup(i); group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge()); group.gauge("minQueueLen", metrics.getMinQueueLenGauge()); group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge()); group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge()); } }
Example #16
Source File: OutputBuffersGauge.java From flink with Apache License 2.0 | 5 votes |
@Override public Integer getValue() { int totalBuffers = 0; for (ResultPartition producedPartition : resultPartitions) { totalBuffers += producedPartition.getNumberOfQueuedBuffers(); } return totalBuffers; }
Example #17
Source File: NettyShuffleMetricFactory.java From flink with Apache License 2.0 | 5 votes |
public static void registerOutputMetrics( boolean isDetailedMetrics, MetricGroup outputGroup, ResultPartition[] resultPartitions) { registerOutputMetrics( isDetailedMetrics, outputGroup, outputGroup.addGroup(METRIC_GROUP_BUFFERS), resultPartitions); }
Example #18
Source File: NettyShuffleMetricFactory.java From flink with Apache License 2.0 | 5 votes |
private static void registerOutputMetrics( boolean isDetailedMetrics, MetricGroup outputGroup, MetricGroup buffersGroup, ResultPartition[] resultPartitions) { if (isDetailedMetrics) { ResultPartitionMetrics.registerQueueLengthMetrics(outputGroup, resultPartitions); } buffersGroup.gauge(METRIC_OUTPUT_QUEUE_LENGTH, new OutputBuffersGauge(resultPartitions)); buffersGroup.gauge(METRIC_OUTPUT_POOL_USAGE, new OutputBufferPoolUsageGauge(resultPartitions)); }
Example #19
Source File: NettyShuffleEnvironmentTest.java From flink with Apache License 2.0 | 5 votes |
private static RemoteInputChannel createRemoteInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartition resultPartition, ConnectionManager connManager) { return InputChannelBuilder.newBuilder() .setChannelIndex(channelIndex) .setPartitionId(resultPartition.getPartitionId()) .setConnectionManager(connManager) .buildRemoteChannel(inputGate); }
Example #20
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
private void testCancelPartitionRequest(boolean isAvailableView) throws Exception { // setup final ResultPartitionManager partitionManager = new ResultPartitionManager(); final ResultPartition partition = createFinishedPartitionWithFilledData(partitionManager); final InputChannelID receiverId = new InputChannelID(); final PartitionRequestQueue queue = new PartitionRequestQueue(); final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue); final EmbeddedChannel channel = new EmbeddedChannel(queue); reader.requestSubpartitionView(partitionManager, partition.getPartitionId(), 0); // add this reader into allReaders queue queue.notifyReaderCreated(reader); // block the channel so that we see an intermediate state in the test blockChannel(channel); // add credit to make this reader available for adding into availableReaders queue if (isAvailableView) { queue.addCreditOrResumeConsumption(receiverId, viewReader -> viewReader.addCredit(1)); assertTrue(queue.getAvailableReaders().contains(reader)); } // cancel this subpartition view queue.cancel(receiverId); channel.runPendingTasks(); assertFalse(queue.getAvailableReaders().contains(reader)); // the partition and its reader view should all be released assertTrue(reader.isReleased()); assertTrue(partition.isReleased()); for (ResultSubpartition subpartition : partition.getAllPartitions()) { assertTrue(subpartition.isReleased()); } // cleanup channel.close(); }
Example #21
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
private static ResultPartition createFinishedPartitionWithFilledData(ResultPartitionManager partitionManager) throws Exception { final ResultPartition partition = new ResultPartitionBuilder() .setResultPartitionType(ResultPartitionType.BLOCKING) .setFileChannelManager(fileChannelManager) .setResultPartitionManager(partitionManager) .isReleasedOnConsumption(true) .build(); partitionManager.registerResultPartition(partition); PartitionTestUtils.writeBuffers(partition, 1, BUFFER_SIZE); return partition; }
Example #22
Source File: InputBuffersMetricsTest.java From flink with Apache License 2.0 | 5 votes |
private RemoteInputChannel buildRemoteChannel( int channelIndex, SingleInputGate inputGate, NettyShuffleEnvironment network, ResultPartition partition) { return new InputChannelBuilder() .setPartitionId(partition.getPartitionId()) .setChannelIndex(channelIndex) .setupFromNettyShuffleEnvironment(network) .setConnectionManager(new TestingConnectionManager()) .buildRemoteChannel(inputGate); }
Example #23
Source File: InputBuffersMetricsTest.java From flink with Apache License 2.0 | 5 votes |
private LocalInputChannel buildLocalChannel( int channelIndex, SingleInputGate inputGate, NettyShuffleEnvironment network, ResultPartition partition) { return new InputChannelBuilder() .setPartitionId(partition.getPartitionId()) .setChannelIndex(channelIndex) .setupFromNettyShuffleEnvironment(network) .setConnectionManager(new TestingConnectionManager()) .buildLocalChannel(inputGate); }
Example #24
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 5 votes |
private static ResultSubpartitionView createResultSubpartitionView(boolean addBuffer) throws IOException { int bufferSize = 4096; ResultPartition parent = PartitionTestUtils.createPartition( ResultPartitionType.PIPELINED, NoOpFileChannelManager.INSTANCE, true, bufferSize); ResultSubpartition subpartition = parent.getAllPartitions()[0]; if (addBuffer) { subpartition.add(BufferBuilderTestUtils.createFilledFinishedBufferConsumer(bufferSize)); } return subpartition.createReadView(() -> {}); }
Example #25
Source File: TestPartitionProducer.java From flink with Apache License 2.0 | 5 votes |
public TestPartitionProducer( ResultPartition partition, boolean isSlowProducer, TestProducerSource source) { this.partition = checkNotNull(partition); this.isSlowProducer = isSlowProducer; this.random = isSlowProducer ? new Random() : null; this.source = checkNotNull(source); }
Example #26
Source File: OutputBuffersGauge.java From flink with Apache License 2.0 | 5 votes |
@Override public Integer getValue() { int totalBuffers = 0; for (ResultPartition producedPartition : resultPartitions) { totalBuffers += producedPartition.getNumberOfQueuedBuffers(); } return totalBuffers; }
Example #27
Source File: Task.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public TaskCanceler( Logger logger, AbstractInvokable invokable, Thread executer, String taskName, ResultPartition[] producedPartitions, SingleInputGate[] inputGates) { this.logger = logger; this.invokable = invokable; this.executer = executer; this.taskName = taskName; this.producedPartitions = producedPartitions; this.inputGates = inputGates; }
Example #28
Source File: TaskIOMetricGroup.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public Integer getValue() { int totalBuffers = 0; for (ResultPartition producedPartition : task.getProducedPartitions()) { totalBuffers += producedPartition.getNumberOfQueuedBuffers(); } return totalBuffers; }
Example #29
Source File: NetworkEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@VisibleForTesting public void setupPartition(ResultPartition partition) throws IOException { BufferPool bufferPool = null; try { int maxNumberOfMemorySegments = partition.getPartitionType().isBounded() ? partition.getNumberOfSubpartitions() * networkBuffersPerChannel + extraNetworkBuffersPerGate : Integer.MAX_VALUE; // If the partition type is back pressure-free, we register with the buffer pool for // callbacks to release memory. bufferPool = networkBufferPool.createBufferPool(partition.getNumberOfSubpartitions(), maxNumberOfMemorySegments, partition.getPartitionType().hasBackPressure() ? Optional.empty() : Optional.of(partition)); partition.registerBufferPool(bufferPool); resultPartitionManager.registerResultPartition(partition); } catch (Throwable t) { if (bufferPool != null) { bufferPool.lazyDestroy(); } if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException(t.getMessage(), t); } } taskEventDispatcher.registerPartition(partition.getPartitionId()); }
Example #30
Source File: NetworkEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public void unregisterTask(Task task) { LOG.debug("Unregister task {} from network environment (state: {}).", task.getTaskInfo().getTaskNameWithSubtasks(), task.getExecutionState()); final ExecutionAttemptID executionId = task.getExecutionId(); synchronized (lock) { if (isShutdown) { // no need to do anything when we are not operational return; } if (task.isCanceledOrFailed()) { resultPartitionManager.releasePartitionsProducedBy(executionId, task.getFailureCause()); } for (ResultPartition partition : task.getProducedPartitions()) { taskEventDispatcher.unregisterPartition(partition.getPartitionId()); partition.destroyBufferPool(); } final SingleInputGate[] inputGates = task.getAllInputGates(); if (inputGates != null) { for (SingleInputGate gate : inputGates) { try { if (gate != null) { gate.releaseAllResources(); } } catch (IOException e) { LOG.error("Error during release of reader resources: " + e.getMessage(), e); } } } } }