org.apache.flink.metrics.MetricGroup Java Examples

The following examples show how to use org.apache.flink.metrics.MetricGroup. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractReporter.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
	final String name = group.getMetricIdentifier(metricName, this);

	synchronized (this) {
		if (metric instanceof Counter) {
			counters.put((Counter) metric, name);
		} else if (metric instanceof Gauge) {
			gauges.put((Gauge<?>) metric, name);
		} else if (metric instanceof Histogram) {
			histograms.put((Histogram) metric, name);
		} else if (metric instanceof Meter) {
			meters.put((Meter) metric, name);
		} else {
			log.warn("Cannot add unknown metric type {}. This indicates that the reporter " +
				"does not support this metric type.", metric.getClass().getName());
		}
	}
}
 
Example #2
Source File: MetricGroupTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that existing key/value groups are returned when calling {@link MetricGroup#addGroup(String)}.
 */
@Test
public void testNameCollisionAfterKeyValueGroup() {
	MetricRegistry registry = NoOpMetricRegistry.INSTANCE;
	GenericMetricGroup root = new GenericMetricGroup(registry, new DummyAbstractMetricGroup(registry), "root");

	String key = "key";
	String value = "value";

	root.addGroup(key, value);
	MetricGroup group = root.addGroup(key).addGroup(value);

	String variableValue = group.getAllVariables().get(ScopeFormat.asVariable("key"));
	assertEquals(value, variableValue);

	String identifier = group.getMetricIdentifier("metric");
	assertTrue("Key is missing from metric identifier.", identifier.contains("key"));
	assertTrue("Value is missing from metric identifier.", identifier.contains("value"));

	String logicalScope = ((AbstractMetricGroup) group).getLogicalScope(new DummyCharacterFilter());
	assertTrue("Key is missing from logical scope.", logicalScope.contains(key));
	assertFalse("Value is present in logical scope.", logicalScope.contains(value));
}
 
Example #3
Source File: SystemResourcesMetricsInitializer.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void instantiateCPUMetrics(MetricGroup metrics, SystemResourcesCounter usageCounter) {
	metrics.<Double, Gauge<Double>>gauge("Usage", usageCounter::getCpuUsage);
	metrics.<Double, Gauge<Double>>gauge("Idle", usageCounter::getCpuIdle);
	metrics.<Double, Gauge<Double>>gauge("Sys", usageCounter::getCpuSys);
	metrics.<Double, Gauge<Double>>gauge("User", usageCounter::getCpuUser);
	metrics.<Double, Gauge<Double>>gauge("IOWait", usageCounter::getIOWait);
	metrics.<Double, Gauge<Double>>gauge("Nice", usageCounter::getCpuNice);
	metrics.<Double, Gauge<Double>>gauge("Irq", usageCounter::getCpuIrq);
	metrics.<Double, Gauge<Double>>gauge("SoftIrq", usageCounter::getCpuSoftIrq);

	metrics.<Double, Gauge<Double>>gauge("Load1min", usageCounter::getCpuLoad1);
	metrics.<Double, Gauge<Double>>gauge("Load5min", usageCounter::getCpuLoad5);
	metrics.<Double, Gauge<Double>>gauge("Load15min", usageCounter::getCpuLoad15);

	for (int i = 0; i < usageCounter.getProcessorsCount(); i++) {
		final int processor = i;
		metrics.<Double, Gauge<Double>>gauge(
			String.format("UsageCPU%d", processor),
			() -> usageCounter.getCpuUsagePerProcessor(processor));
	}
}
 
Example #4
Source File: MetricGroupTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies the basic behavior when defining user-defined variables.
 */
@Test
public void testUserDefinedVariable() {
	MetricRegistry registry = NoOpMetricRegistry.INSTANCE;
	GenericMetricGroup root = new GenericMetricGroup(registry, new DummyAbstractMetricGroup(registry), "root");

	String key = "key";
	String value = "value";
	MetricGroup group = root.addGroup(key, value);

	String variableValue = group.getAllVariables().get(ScopeFormat.asVariable("key"));
	assertEquals(value, variableValue);

	String identifier = group.getMetricIdentifier("metric");
	assertTrue("Key is missing from metric identifier.", identifier.contains("key"));
	assertTrue("Value is missing from metric identifier.", identifier.contains("value"));

	String logicalScope = ((AbstractMetricGroup) group).getLogicalScope(new DummyCharacterFilter());
	assertTrue("Key is missing from logical scope.", logicalScope.contains(key));
	assertFalse("Value is present in logical scope.", logicalScope.contains(value));
}
 
Example #5
Source File: FlinkKafkaConsumerBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
protected TestingFetcher(
		SourceFunction.SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> seedPartitionsWithInitialOffsets,
		SerializedValue<WatermarkStrategy<T>> watermarkStrategy,
		ProcessingTimeService processingTimeProvider,
		long autoWatermarkInterval,
		ClassLoader userCodeClassLoader,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {
	super(
			sourceContext,
			seedPartitionsWithInitialOffsets,
			watermarkStrategy,
			processingTimeProvider,
			autoWatermarkInterval,
			userCodeClassLoader,
			consumerMetricGroup,
			useMetrics);
}
 
Example #6
Source File: MockStateBackend.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(
	Environment env,
	JobID jobID,
	String operatorIdentifier,
	TypeSerializer<K> keySerializer,
	int numberOfKeyGroups,
	KeyGroupRange keyGroupRange,
	TaskKvStateRegistry kvStateRegistry,
	TtlTimeProvider ttlTimeProvider,
	MetricGroup metricGroup,
	@Nonnull Collection<KeyedStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) {
	return new MockKeyedStateBackendBuilder<>(
		new KvStateRegistry().createTaskRegistry(jobID, new JobVertexID()),
		keySerializer,
		env.getUserClassLoader(),
		numberOfKeyGroups,
		keyGroupRange,
		env.getExecutionConfig(),
		ttlTimeProvider,
		stateHandles,
		AbstractStateBackend.getCompressionDecorator(env.getExecutionConfig()),
		cancelStreamRegistry).build();
}
 
Example #7
Source File: NettyShuffleMetricFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void registerInputMetrics(
		boolean isDetailedMetrics,
		MetricGroup inputGroup,
		MetricGroup buffersGroup,
		SingleInputGate[] inputGates) {
	if (isDetailedMetrics) {
		InputGateMetrics.registerQueueLengthMetrics(inputGroup, inputGates);
	}

	buffersGroup.gauge(METRIC_INPUT_QUEUE_LENGTH, new InputBuffersGauge(inputGates));

	FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
	ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
	CreditBasedInputBuffersUsageGauge creditBasedInputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(
		floatingBuffersUsageGauge,
		exclusiveBuffersUsageGauge,
		inputGates);
	buffersGroup.gauge(METRIC_INPUT_EXCLUSIVE_BUFFERS_USAGE, exclusiveBuffersUsageGauge);
	buffersGroup.gauge(METRIC_INPUT_FLOATING_BUFFERS_USAGE, floatingBuffersUsageGauge);
	buffersGroup.gauge(METRIC_INPUT_POOL_USAGE, creditBasedInputBuffersUsageGauge);
}
 
Example #8
Source File: MetricGroupTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies the basic behavior when defining user-defined variables.
 */
@Test
public void testUserDefinedVariable() {
	MetricRegistry registry = NoOpMetricRegistry.INSTANCE;
	GenericMetricGroup root = new GenericMetricGroup(registry, new DummyAbstractMetricGroup(registry), "root");

	String key = "key";
	String value = "value";
	MetricGroup group = root.addGroup(key, value);

	String variableValue = group.getAllVariables().get(ScopeFormat.asVariable("key"));
	assertEquals(value, variableValue);

	String identifier = group.getMetricIdentifier("metric");
	assertTrue("Key is missing from metric identifier.", identifier.contains("key"));
	assertTrue("Value is missing from metric identifier.", identifier.contains("value"));

	String logicalScope = ((AbstractMetricGroup) group).getLogicalScope(new DummyCharacterFilter());
	assertTrue("Key is missing from logical scope.", logicalScope.contains(key));
	assertFalse("Value is present in logical scope.", logicalScope.contains(value));
}
 
Example #9
Source File: MetricUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static JobManagerMetricGroup instantiateJobManagerMetricGroup(
		final MetricRegistry metricRegistry,
		final String hostname,
		final Optional<Time> systemResourceProbeInterval) {
	final JobManagerMetricGroup jobManagerMetricGroup = new JobManagerMetricGroup(
		metricRegistry,
		hostname);

	MetricGroup statusGroup = jobManagerMetricGroup.addGroup(METRIC_GROUP_STATUS_NAME);

	// initialize the JM metrics
	instantiateStatusMetrics(statusGroup);

	if (systemResourceProbeInterval.isPresent()) {
		instantiateSystemMetrics(jobManagerMetricGroup, systemResourceProbeInterval.get());
	}
	return jobManagerMetricGroup;
}
 
Example #10
Source File: JMXReporter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void notifyOfRemovedMetric(Metric metric, String metricName, MetricGroup group) {
	try {
		synchronized (this) {
			final ObjectName jmxName = registeredMetrics.remove(metric);

			// remove the metric if it is known. if it is not known, ignore the request
			if (jmxName != null) {
				mBeanServer.unregisterMBean(jmxName);
			}
		}
	} catch (InstanceNotFoundException e) {
		// alright then
	} catch (Throwable t) {
		// never propagate exceptions - the metrics reporter should not affect the stability
		// of the running system
		LOG.error("Un-registering metric failed", t);
	}
}
 
Example #11
Source File: MetricGroupRegistrationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that when attempting to create a group with the name of an existing one the existing one will be returned instead.
 */
@Test
public void testDuplicateGroupName() throws Exception {
	Configuration config = new Configuration();

	MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(config));

	MetricGroup root = new TaskManagerMetricGroup(registry, "host", "id");

	MetricGroup group1 = root.addGroup("group");
	MetricGroup group2 = root.addGroup("group");
	MetricGroup group3 = root.addGroup("group");
	Assert.assertTrue(group1 == group2 && group2 == group3);

	registry.shutdown().get();
}
 
Example #12
Source File: DatadogHttpReporter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
	final String name = group.getMetricIdentifier(metricName);

	List<String> tags = new ArrayList<>(configTags);
	tags.addAll(getTagsFromMetricGroup(group));
	String host = getHostFromMetricGroup(group);

	if (metric instanceof Counter) {
		Counter c = (Counter) metric;
		counters.put(c, new DCounter(c, name, host, tags));
	} else if (metric instanceof Gauge) {
		Gauge g = (Gauge) metric;
		gauges.put(g, new DGauge(g, name, host, tags));
	} else if (metric instanceof Meter) {
		Meter m = (Meter) metric;
		// Only consider rate
		meters.put(m, new DMeter(m, name, host, tags));
	} else if (metric instanceof Histogram) {
		LOGGER.warn("Cannot add {} because Datadog HTTP API doesn't support Histogram", metricName);
	} else {
		LOGGER.warn("Cannot add unknown metric type {}. This indicates that the reporter " +
			"does not support this metric type.", metric.getClass().getName());
	}
}
 
Example #13
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private <IN> void executeDataSink(GenericDataSinkBase<?> sink, int superStep) throws Exception {
	Operator<?> inputOp = sink.getInput();
	if (inputOp == null) {
		throw new InvalidProgramException("The data sink " + sink.getName() + " has no input.");
	}
	
	@SuppressWarnings("unchecked")
	List<IN> input = (List<IN>) execute(inputOp);
	
	@SuppressWarnings("unchecked")
	GenericDataSinkBase<IN> typedSink = (GenericDataSinkBase<IN>) sink;

	// build the runtime context and compute broadcast variables, if necessary
	TaskInfo taskInfo = new TaskInfo(typedSink.getName(), 1, 0, 1, 0);
	RuntimeUDFContext ctx;

	MetricGroup metrics = new UnregisteredMetricsGroup();
		
	if (RichOutputFormat.class.isAssignableFrom(typedSink.getUserCodeWrapper().getUserCodeClass())) {
		ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics) :
				new IterationRuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics);
	} else {
		ctx = null;
	}

	typedSink.executeOnCollections(input, ctx, executionConfig);
}
 
Example #14
Source File: AbstractRuntimeUDFContext.java    From flink with Apache License 2.0 5 votes vote down vote up
public AbstractRuntimeUDFContext(TaskInfo taskInfo,
									ClassLoader userCodeClassLoader,
									ExecutionConfig executionConfig,
									Map<String, Accumulator<?, ?>> accumulators,
									Map<String, Future<Path>> cpTasks,
									MetricGroup metrics) {
	this.taskInfo = checkNotNull(taskInfo);
	this.userCodeClassLoader = userCodeClassLoader;
	this.executionConfig = executionConfig;
	this.distributedCache = new DistributedCache(checkNotNull(cpTasks));
	this.accumulators = checkNotNull(accumulators);
	this.metrics = metrics;
}
 
Example #15
Source File: MetricRegistryImplTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
	String expectedMetric = "A" + expectedDelimiter + "B" + expectedDelimiter + "C";
	assertEquals(expectedMetric, group.getMetricIdentifier(metricName, this));
	assertEquals(expectedMetric, group.getMetricIdentifier(metricName));
	numCorrectDelimitersForRegister++;
}
 
Example #16
Source File: RocksDBNativeMetricMonitor.java    From flink with Apache License 2.0 5 votes vote down vote up
public RocksDBNativeMetricMonitor(
	@Nonnull RocksDBNativeMetricOptions options,
	@Nonnull MetricGroup metricGroup,
	@Nonnull RocksDB rocksDB
) {
	this.options = options;
	this.metricGroup = metricGroup;
	this.rocksDB = rocksDB;

	this.lock = new Object();
}
 
Example #17
Source File: LatencyStats.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
MetricGroup createSourceMetricGroups(
		MetricGroup base,
		LatencyMarker marker,
		OperatorID operatorId,
		int operatorSubtaskIndex) {
	return base;
}
 
Example #18
Source File: CollectionExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private <IN1, IN2, OUT> List<OUT> executeBinaryOperator(DualInputOperator<?, ?, ?, ?> operator, int superStep) throws Exception {
	Operator<?> inputOp1 = operator.getFirstInput();
	Operator<?> inputOp2 = operator.getSecondInput();
	
	if (inputOp1 == null) {
		throw new InvalidProgramException("The binary operation " + operator.getName() + " has no first input.");
	}
	if (inputOp2 == null) {
		throw new InvalidProgramException("The binary operation " + operator.getName() + " has no second input.");
	}
	
	// compute inputs
	@SuppressWarnings("unchecked")
	List<IN1> inputData1 = (List<IN1>) execute(inputOp1, superStep);
	@SuppressWarnings("unchecked")
	List<IN2> inputData2 = (List<IN2>) execute(inputOp2, superStep);
	
	@SuppressWarnings("unchecked")
	DualInputOperator<IN1, IN2, OUT, ?> typedOp = (DualInputOperator<IN1, IN2, OUT, ?>) operator;
	
	// build the runtime context and compute broadcast variables, if necessary
	TaskInfo taskInfo = new TaskInfo(typedOp.getName(), 1, 0, 1, 0);
	RuntimeUDFContext ctx;

	MetricGroup metrics = new UnregisteredMetricsGroup();

	if (RichFunction.class.isAssignableFrom(typedOp.getUserCodeWrapper().getUserCodeClass())) {
		ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics) :
			new IterationRuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics);
		
		for (Map.Entry<String, Operator<?>> bcInputs : operator.getBroadcastInputs().entrySet()) {
			List<?> bcData = execute(bcInputs.getValue());
			ctx.setBroadcastVariable(bcInputs.getKey(), bcData);
		}
	} else {
		ctx = null;
	}

	return typedOp.executeOnCollections(inputData1, inputData2, ctx, executionConfig);
}
 
Example #19
Source File: KafkaConsumerThread.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public KafkaConsumerThread(
		Logger log,
		Handover handover,
		Properties kafkaProperties,
		ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue,
		KafkaConsumerCallBridge09 consumerCallBridge,
		String threadName,
		long pollTimeout,
		boolean useMetrics,
		MetricGroup consumerMetricGroup,
		MetricGroup subtaskMetricGroup,
		FlinkConnectorRateLimiter rateLimiter) {

	super(threadName);
	setDaemon(true);

	this.log = checkNotNull(log);
	this.handover = checkNotNull(handover);
	this.kafkaProperties = checkNotNull(kafkaProperties);
	this.consumerMetricGroup = checkNotNull(consumerMetricGroup);
	this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup);
	this.consumerCallBridge = checkNotNull(consumerCallBridge);

	this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue);

	this.pollTimeout = pollTimeout;
	this.useMetrics = useMetrics;

	this.consumerReassignmentLock = new Object();
	this.nextOffsetsToCommit = new AtomicReference<>();
	this.running = true;

	if (rateLimiter != null) {
		this.rateLimiter = rateLimiter;
	}
}
 
Example #20
Source File: AbstractMetricGroupTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void checkScopes(Metric metric, String metricName, MetricGroup group) {
	// the first call determines which filter is applied to all future calls
	assertEquals("A!B!X!D!1", group.getMetricIdentifier(metricName, this));
	// from now on the scope string is cached and should not be reliant on the given filter
	assertEquals("A!B!X!D!1", group.getMetricIdentifier(metricName));
	assertEquals("A!B!X!D!1", group.getMetricIdentifier(metricName, FILTER_C));
	// the metric name however is still affected by the filter as it is not cached
	assertEquals("A!B!X!D!3", group.getMetricIdentifier(metricName, new CharacterFilter() {
		@Override
		public String filterCharacters(String input) {
			return input.replace("A", "X").replace("1", "3");
		}
	}));
}
 
Example #21
Source File: KafkaShuffleFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
public KafkaShuffleFetcher(
		SourceFunction.SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<WatermarkStrategy<T>> watermarkStrategy,
		ProcessingTimeService processingTimeProvider,
		long autoWatermarkInterval,
		ClassLoader userCodeClassLoader,
		String taskNameWithSubtasks,
		KafkaDeserializationSchema<T> deserializer,
		Properties kafkaProperties,
		long pollTimeout,
		MetricGroup subtaskMetricGroup,
		MetricGroup consumerMetricGroup,
		boolean useMetrics,
		TypeSerializer<T> typeSerializer,
		int producerParallelism) throws Exception {
	super(
		sourceContext,
		assignedPartitionsWithInitialOffsets,
		watermarkStrategy,
		processingTimeProvider,
		autoWatermarkInterval,
		userCodeClassLoader,
		taskNameWithSubtasks,
		deserializer,
		kafkaProperties,
		pollTimeout,
		subtaskMetricGroup,
		consumerMetricGroup,
		useMetrics);

	this.kafkaShuffleDeserializer = new KafkaShuffleElementDeserializer<>(typeSerializer);
	this.watermarkHandler = new WatermarkHandler(producerParallelism);
}
 
Example #22
Source File: TaskIOMetricGroup.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize Buffer Metrics for a task.
 */
public void initializeBufferMetrics(Task task) {
	final MetricGroup buffers = addGroup("buffers");
	buffers.gauge("inputQueueLength", new InputBuffersGauge(task));
	buffers.gauge("outputQueueLength", new OutputBuffersGauge(task));
	buffers.gauge("inPoolUsage", new InputBufferPoolUsageGauge(task));
	buffers.gauge("outPoolUsage", new OutputBufferPoolUsageGauge(task));
}
 
Example #23
Source File: FeedbackSinkOperator.java    From flink-statefun with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
  super.open();
  final int indexOfThisSubtask = getRuntimeContext().getIndexOfThisSubtask();
  final SubtaskFeedbackKey<V> key = this.key.withSubTaskIndex(indexOfThisSubtask);

  FeedbackChannelBroker broker = FeedbackChannelBroker.get();
  this.channel = broker.getChannel(key);

  // metrics
  MetricGroup metrics = getRuntimeContext().getMetricGroup();
  SimpleCounter produced = metrics.counter("produced", new SimpleCounter());
  metrics.meter("producedRate", new MeterView(produced, 60));
  this.totalProduced = produced;
}
 
Example #24
Source File: SystemResourcesMetricsInitializer.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static void instantiateNetworkMetrics(MetricGroup metrics, SystemResourcesCounter usageCounter) {
	for (int i = 0; i < usageCounter.getNetworkInterfaceNames().length; i++) {
		MetricGroup interfaceGroup = metrics.addGroup(usageCounter.getNetworkInterfaceNames()[i]);

		final int interfaceNo = i;
		interfaceGroup.<Long, Gauge<Long>>gauge("ReceiveRate", () -> usageCounter.getReceiveRatePerInterface(interfaceNo));
		interfaceGroup.<Long, Gauge<Long>>gauge("SendRate", () -> usageCounter.getSendRatePerInterface(interfaceNo));
	}
}
 
Example #25
Source File: MetricUtils.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static void instantiateNetworkMetrics(
	MetricGroup metrics,
	final NetworkEnvironment network) {

	final NetworkBufferPool networkBufferPool = network.getNetworkBufferPool();
	metrics.<Integer, Gauge<Integer>>gauge("TotalMemorySegments", networkBufferPool::getTotalNumberOfMemorySegments);
	metrics.<Integer, Gauge<Integer>>gauge("AvailableMemorySegments", networkBufferPool::getNumberOfAvailableMemorySegments);
}
 
Example #26
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private <IN1, IN2, OUT> List<OUT> executeBinaryOperator(DualInputOperator<?, ?, ?, ?> operator, int superStep) throws Exception {
	Operator<?> inputOp1 = operator.getFirstInput();
	Operator<?> inputOp2 = operator.getSecondInput();
	
	if (inputOp1 == null) {
		throw new InvalidProgramException("The binary operation " + operator.getName() + " has no first input.");
	}
	if (inputOp2 == null) {
		throw new InvalidProgramException("The binary operation " + operator.getName() + " has no second input.");
	}
	
	// compute inputs
	@SuppressWarnings("unchecked")
	List<IN1> inputData1 = (List<IN1>) execute(inputOp1, superStep);
	@SuppressWarnings("unchecked")
	List<IN2> inputData2 = (List<IN2>) execute(inputOp2, superStep);
	
	@SuppressWarnings("unchecked")
	DualInputOperator<IN1, IN2, OUT, ?> typedOp = (DualInputOperator<IN1, IN2, OUT, ?>) operator;
	
	// build the runtime context and compute broadcast variables, if necessary
	TaskInfo taskInfo = new TaskInfo(typedOp.getName(), 1, 0, 1, 0);
	RuntimeUDFContext ctx;

	MetricGroup metrics = new UnregisteredMetricsGroup();

	if (RichFunction.class.isAssignableFrom(typedOp.getUserCodeWrapper().getUserCodeClass())) {
		ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics) :
			new IterationRuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics);
		
		for (Map.Entry<String, Operator<?>> bcInputs : operator.getBroadcastInputs().entrySet()) {
			List<?> bcData = execute(bcInputs.getValue());
			ctx.setBroadcastVariable(bcInputs.getKey(), bcData);
		}
	} else {
		ctx = null;
	}

	return typedOp.executeOnCollections(inputData1, inputData2, ctx, executionConfig);
}
 
Example #27
Source File: ResultPartitionMetrics.java    From flink with Apache License 2.0 5 votes vote down vote up
public static void registerQueueLengthMetrics(MetricGroup parent, ResultPartition[] partitions) {
	for (int i = 0; i < partitions.length; i++) {
		ResultPartitionMetrics metrics = new ResultPartitionMetrics(partitions[i]);

		MetricGroup group = parent.addGroup(i);
		group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge());
		group.gauge("minQueueLen", metrics.getMinQueueLenGauge());
		group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge());
		group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge());
	}
}
 
Example #28
Source File: InputGateMetrics.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static void registerQueueLengthMetrics(MetricGroup group, SingleInputGate gate) {
	InputGateMetrics metrics = new InputGateMetrics(gate);

	group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge());
	group.gauge("minQueueLen", metrics.getMinQueueLenGauge());
	group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge());
	group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge());
}
 
Example #29
Source File: MemoryStateBackend.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(
	Environment env,
	JobID jobID,
	String operatorIdentifier,
	TypeSerializer<K> keySerializer,
	int numberOfKeyGroups,
	KeyGroupRange keyGroupRange,
	TaskKvStateRegistry kvStateRegistry,
	TtlTimeProvider ttlTimeProvider,
	MetricGroup metricGroup,
	@Nonnull Collection<KeyedStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws BackendBuildingException {

	TaskStateManager taskStateManager = env.getTaskStateManager();
	HeapPriorityQueueSetFactory priorityQueueSetFactory =
		new HeapPriorityQueueSetFactory(keyGroupRange, numberOfKeyGroups, 128);
	return new HeapKeyedStateBackendBuilder<>(
		kvStateRegistry,
		keySerializer,
		env.getUserClassLoader(),
		numberOfKeyGroups,
		keyGroupRange,
		env.getExecutionConfig(),
		ttlTimeProvider,
		stateHandles,
		AbstractStateBackend.getCompressionDecorator(env.getExecutionConfig()),
		taskStateManager.createLocalRecoveryConfig(),
		priorityQueueSetFactory,
		isUsingAsynchronousSnapshots(),
		cancelStreamRegistry).build();
}
 
Example #30
Source File: AbstractStateBackend.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public abstract <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(
	Environment env,
	JobID jobID,
	String operatorIdentifier,
	TypeSerializer<K> keySerializer,
	int numberOfKeyGroups,
	KeyGroupRange keyGroupRange,
	TaskKvStateRegistry kvStateRegistry,
	TtlTimeProvider ttlTimeProvider,
	MetricGroup metricGroup,
	@Nonnull Collection<KeyedStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws IOException;