org.apache.flink.util.Preconditions Java Examples

The following examples show how to use org.apache.flink.util.Preconditions. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CollectSinkFunction.java    From flink with Apache License 2.0 6 votes vote down vote up
private InetAddress getBindAddress() {
	RuntimeContext context = getRuntimeContext();
	Preconditions.checkState(
		context instanceof StreamingRuntimeContext,
		"CollectSinkFunction can only be used in StreamTask");
	StreamingRuntimeContext streamingContext = (StreamingRuntimeContext) context;
	String bindAddress = streamingContext.getTaskManagerRuntimeInfo().getTaskManagerBindAddress();

	if (bindAddress != null) {
		try {
			return InetAddress.getByName(bindAddress);
		} catch (UnknownHostException e) {
			return null;
		}
	}
	return null;
}
 
Example #2
Source File: HeartbeatManagerImpl.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public HeartbeatManagerImpl(
		long heartbeatTimeoutIntervalMs,
		ResourceID ownResourceID,
		HeartbeatListener<I, O> heartbeatListener,
		ScheduledExecutor mainThreadExecutor,
		Logger log) {
	Preconditions.checkArgument(heartbeatTimeoutIntervalMs > 0L, "The heartbeat timeout has to be larger than 0.");

	this.heartbeatTimeoutIntervalMs = heartbeatTimeoutIntervalMs;
	this.ownResourceID = Preconditions.checkNotNull(ownResourceID);
	this.heartbeatListener = Preconditions.checkNotNull(heartbeatListener, "heartbeatListener");
	this.mainThreadExecutor = Preconditions.checkNotNull(mainThreadExecutor);
	this.log = Preconditions.checkNotNull(log);
	this.heartbeatTargets = new ConcurrentHashMap<>(16);

	stopped = false;
}
 
Example #3
Source File: JobDetailsHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
public JobDetailsHandler(
		GatewayRetriever<? extends RestfulGateway> leaderRetriever,
		Time timeout,
		Map<String, String> responseHeaders,
		MessageHeaders<EmptyRequestBody, JobDetailsInfo, JobMessageParameters> messageHeaders,
		ExecutionGraphCache executionGraphCache,
		Executor executor,
		MetricFetcher metricFetcher) {
	super(
		leaderRetriever,
		timeout,
		responseHeaders,
		messageHeaders,
		executionGraphCache,
		executor);

	this.metricFetcher = Preconditions.checkNotNull(metricFetcher);
}
 
Example #4
Source File: StreamExecutionEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a data stream from the given non-empty collection. The type of the data stream is that of the
 * elements in the collection.
 *
 * <p>The framework will try and determine the exact type from the collection elements. In case of generic
 * elements, it may be necessary to manually supply the type information via
 * {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
 *
 * <p>Note that this operation will result in a non-parallel data stream source, i.e. a data stream source with
 * parallelism one.
 *
 * @param data
 * 		The collection of elements to create the data stream from.
 * @param <OUT>
 *     The generic type of the returned data stream.
 * @return
 *     The data stream representing the given collection
 */
public <OUT> DataStreamSource<OUT> fromCollection(Collection<OUT> data) {
	Preconditions.checkNotNull(data, "Collection must not be null");
	if (data.isEmpty()) {
		throw new IllegalArgumentException("Collection must not be empty");
	}

	OUT first = data.iterator().next();
	if (first == null) {
		throw new IllegalArgumentException("Collection must not contain null elements");
	}

	TypeInformation<OUT> typeInfo;
	try {
		typeInfo = TypeExtractor.getForObject(first);
	}
	catch (Exception e) {
		throw new RuntimeException("Could not create TypeInformation for type " + first.getClass()
				+ "; please specify the TypeInformation manually via "
				+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)", e);
	}
	return fromCollection(data, typeInfo);
}
 
Example #5
Source File: CheckpointCoordinatorConfiguration.java    From flink with Apache License 2.0 6 votes vote down vote up
public CheckpointCoordinatorConfiguration(
		long checkpointInterval,
		long checkpointTimeout,
		long minPauseBetweenCheckpoints,
		int maxConcurrentCheckpoints,
		CheckpointRetentionPolicy checkpointRetentionPolicy,
		boolean isExactlyOnce,
		boolean isPreferCheckpointForRecovery,
		int tolerableCpFailureNumber) {

	// sanity checks
	if (checkpointInterval < MINIMAL_CHECKPOINT_TIME || checkpointTimeout < MINIMAL_CHECKPOINT_TIME ||
		minPauseBetweenCheckpoints < 0 || maxConcurrentCheckpoints < 1 ||
		tolerableCpFailureNumber < 0) {
		throw new IllegalArgumentException();
	}

	this.checkpointInterval = checkpointInterval;
	this.checkpointTimeout = checkpointTimeout;
	this.minPauseBetweenCheckpoints = minPauseBetweenCheckpoints;
	this.maxConcurrentCheckpoints = maxConcurrentCheckpoints;
	this.checkpointRetentionPolicy = Preconditions.checkNotNull(checkpointRetentionPolicy);
	this.isExactlyOnce = isExactlyOnce;
	this.isPreferCheckpointForRecovery = isPreferCheckpointForRecovery;
	this.tolerableCheckpointFailureNumber = tolerableCpFailureNumber;
}
 
Example #6
Source File: AbstractHeapState.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public byte[] getSerializedValue(
		final byte[] serializedKeyAndNamespace,
		final TypeSerializer<K> safeKeySerializer,
		final TypeSerializer<N> safeNamespaceSerializer,
		final TypeSerializer<SV> safeValueSerializer) throws Exception {

	Preconditions.checkNotNull(serializedKeyAndNamespace);
	Preconditions.checkNotNull(safeKeySerializer);
	Preconditions.checkNotNull(safeNamespaceSerializer);
	Preconditions.checkNotNull(safeValueSerializer);

	Tuple2<K, N> keyAndNamespace = KvStateSerializer.deserializeKeyAndNamespace(
			serializedKeyAndNamespace, safeKeySerializer, safeNamespaceSerializer);

	SV result = stateTable.get(keyAndNamespace.f0, keyAndNamespace.f1);

	if (result == null) {
		return null;
	}
	return KvStateSerializer.serializeValue(result, safeValueSerializer);
}
 
Example #7
Source File: OperatorState.java    From flink with Apache License 2.0 5 votes vote down vote up
public void putState(int subtaskIndex, OperatorSubtaskState subtaskState) {
	Preconditions.checkNotNull(subtaskState);

	if (subtaskIndex < 0 || subtaskIndex >= parallelism) {
		throw new IndexOutOfBoundsException("The given sub task index " + subtaskIndex +
			" exceeds the maximum number of sub tasks " + operatorSubtaskStates.size());
	} else {
		operatorSubtaskStates.put(subtaskIndex, subtaskState);
	}
}
 
Example #8
Source File: FlinkPravegaReader.java    From flink-connectors with Apache License 2.0 5 votes vote down vote up
protected PeriodicWatermarkEmitter(
        EventStreamReader<?> pravegaReader, SourceContext<?> ctx, ClassLoader userCodeClassLoader,
        ProcessingTimeService timerService) throws Exception {
    this.pravegaReader = Preconditions.checkNotNull(pravegaReader);
    this.stream = Stream.of(readerGroup.getStreamNames().iterator().next());
    this.ctx = Preconditions.checkNotNull(ctx);
    this.timerService = Preconditions.checkNotNull(timerService);
    this.lastWatermarkTimestamp = Long.MIN_VALUE;
    this.userAssigner = assignerWithTimeWindows.deserializeValue(userCodeClassLoader);
}
 
Example #9
Source File: StateReaderOperator.java    From flink with Apache License 2.0 5 votes vote down vote up
protected StateReaderOperator(F function, TypeInformation<KEY> keyType, TypeSerializer<N> namespaceSerializer) {
	Preconditions.checkNotNull(function, "The user function must not be null");
	Preconditions.checkNotNull(keyType, "The key type must not be null");
	Preconditions.checkNotNull(namespaceSerializer, "The namespace serializer must not be null");

	this.function = function;
	this.keyType = keyType;
	this.namespaceSerializer = namespaceSerializer;
}
 
Example #10
Source File: FileCopyTaskInputFormat.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void returnInputSplit(List<InputSplit> splits, int taskId) {
	synchronized (this.splits) {
		for (InputSplit split : splits) {
			Preconditions.checkState(this.splits.add((FileCopyTaskInputSplit) split));
		}
	}
}
 
Example #11
Source File: CompositeSerializer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public T copy(T from, T reuse) {
	Preconditions.checkNotNull(from);
	Preconditions.checkNotNull(reuse);
	if (isImmutableType()) {
		return from;
	}
	Object[] fields = new Object[fieldSerializers.length];
	for (int index = 0; index < fieldSerializers.length; index++) {
		fields[index] = fieldSerializers[index].copy(getField(from, index), getField(reuse, index));
	}
	return createInstanceWithReuse(fields, reuse);
}
 
Example #12
Source File: FileUploads.java    From flink with Apache License 2.0 5 votes vote down vote up
public FileUploads(@Nonnull Path uploadDirectory) {
	Preconditions.checkNotNull(uploadDirectory, "UploadDirectory must not be null.");
	Preconditions.checkArgument(Files.exists(uploadDirectory), "UploadDirectory does not exist.");
	Preconditions.checkArgument(Files.isDirectory(uploadDirectory), "UploadDirectory is not a directory.");
	Preconditions.checkArgument(uploadDirectory.isAbsolute(), "UploadDirectory is not absolute.");
	this.uploadDirectory = uploadDirectory;
}
 
Example #13
Source File: FlinkExecutableStageFunction.java    From beam with Apache License 2.0 5 votes vote down vote up
private void processElements(Iterable<WindowedValue<InputT>> iterable, RemoteBundle bundle)
    throws Exception {
  Preconditions.checkArgument(bundle != null, "RemoteBundle must not be null");

  FnDataReceiver<WindowedValue<?>> mainReceiver =
      Iterables.getOnlyElement(bundle.getInputReceivers().values());
  for (WindowedValue<InputT> input : iterable) {
    mainReceiver.accept(input);
  }
}
 
Example #14
Source File: AbstractServerBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Starts the server by binding to the configured bind address (blocking).
 * @throws Exception If something goes wrong during the bind operation.
 */
public void start() throws Throwable {
	Preconditions.checkState(serverAddress == null && serverShutdownFuture.get() == null,
			serverName + " is already running @ " + serverAddress + ". ");

	Iterator<Integer> portIterator = bindPortRange.iterator();
	while (portIterator.hasNext() && !attemptToBind(portIterator.next())) {}

	if (serverAddress != null) {
		log.info("Started {} @ {}.", serverName, serverAddress);
	} else {
		log.info("Unable to start {}. All ports in provided range ({}) are occupied.", serverName, bindPortRange);
		throw new FlinkRuntimeException("Unable to start " + serverName + ". All ports in provided range are occupied.");
	}
}
 
Example #15
Source File: ScatterGatherIteration.java    From flink with Apache License 2.0 5 votes vote down vote up
private ScatterGatherIteration(ScatterFunction<K, VV, Message, EV> sf,
		GatherFunction<K, VV, Message> gf,
		DataSet<Edge<K, EV>> edgesWithValue,
		int maximumNumberOfIterations) {
	Preconditions.checkNotNull(sf);
	Preconditions.checkNotNull(gf);
	Preconditions.checkNotNull(edgesWithValue);
	Preconditions.checkArgument(maximumNumberOfIterations > 0, "The maximum number of iterations must be at least one.");

	this.scatterFunction = sf;
	this.gatherFunction = gf;
	this.edgesWithValue = edgesWithValue;
	this.maximumNumberOfIterations = maximumNumberOfIterations;
	this.messageType = getMessageType(sf);
}
 
Example #16
Source File: AvroSchemaConverter.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Converts an Avro class into a nested row structure with deterministic field order and data
 * types that are compatible with Flink's Table & SQL API.
 *
 * @param avroClass Avro specific record that contains schema information
 * @return type information matching the schema
 */
@SuppressWarnings("unchecked")
public static <T extends SpecificRecord> TypeInformation<Row> convertToTypeInfo(Class<T> avroClass) {
	Preconditions.checkNotNull(avroClass, "Avro specific record class must not be null.");
	// determine schema to retrieve deterministic field order
	final Schema schema = SpecificData.get().getSchema(avroClass);
	return (TypeInformation<Row>) convertToTypeInfo(schema);
}
 
Example #17
Source File: InternalTimeServiceManager.java    From flink with Apache License 2.0 5 votes vote down vote up
public void snapshotStateForKeyGroup(DataOutputView stream, int keyGroupIdx) throws IOException {
	Preconditions.checkState(useLegacySynchronousSnapshots);
	InternalTimerServiceSerializationProxy<K> serializationProxy =
		new InternalTimerServiceSerializationProxy<>(this, keyGroupIdx);

	serializationProxy.write(stream);
}
 
Example #18
Source File: StateDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets the name for queries of state created from this descriptor.
 *
 * <p>If a name is set, the created state will be published for queries
 * during runtime. The name needs to be unique per job. If there is another
 * state instance published under the same name, the job will fail during runtime.
 *
 * @param queryableStateName State name for queries (unique name per job)
 * @throws IllegalStateException If queryable state name already set
 */
public void setQueryable(String queryableStateName) {
	Preconditions.checkArgument(
		ttlConfig.getUpdateType() == StateTtlConfig.UpdateType.Disabled,
		"Queryable state is currently not supported with TTL");
	if (this.queryableStateName == null) {
		this.queryableStateName = Preconditions.checkNotNull(queryableStateName, "Registration name");
	} else {
		throw new IllegalStateException("Queryable state name already set");
	}
}
 
Example #19
Source File: TestStreamEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
public TestStreamEnvironment(
		JobExecutor jobExecutor,
		int parallelism,
		Collection<Path> jarFiles,
		Collection<URL> classPaths) {

	this.jobExecutor = Preconditions.checkNotNull(jobExecutor);
	this.jarFiles = Preconditions.checkNotNull(jarFiles);
	this.classPaths = Preconditions.checkNotNull(classPaths);
	getConfiguration().set(DeploymentOptions.TARGET, LocalExecutor.NAME);
	getConfiguration().set(DeploymentOptions.ATTACHED, true);

	setParallelism(parallelism);
}
 
Example #20
Source File: Transformation.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code Transformation} with the given name, output type and parallelism.
 *
 * @param name The name of the {@code Transformation}, this will be shown in Visualizations and the Log
 * @param outputType The output type of this {@code Transformation}
 * @param parallelism The parallelism of this {@code Transformation}
 */
public Transformation(String name, TypeInformation<T> outputType, int parallelism) {
	this.id = getNewNodeId();
	this.name = Preconditions.checkNotNull(name);
	this.outputType = outputType;
	this.parallelism = parallelism;
	this.slotSharingGroup = null;
}
 
Example #21
Source File: MetricRegistryImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the MetricQueryService.
 *
 * @param rpcService RpcService to create the MetricQueryService on
 * @param resourceID resource ID used to disambiguate the actor name
    */
public void startQueryService(RpcService rpcService, ResourceID resourceID) {
	synchronized (lock) {
		Preconditions.checkState(!isShutdown(), "The metric registry has already been shut down.");

		try {
			metricQueryServiceRpcService = rpcService;
			queryService = MetricQueryService.createMetricQueryService(rpcService, resourceID, maximumFramesize);
			queryService.start();
		} catch (Exception e) {
			LOG.warn("Could not start MetricDumpActor. No metrics will be submitted to the WebInterface.", e);
		}
	}
}
 
Example #22
Source File: CompositeSerializer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(T record, DataOutputView target) throws IOException {
	Preconditions.checkNotNull(record);
	Preconditions.checkNotNull(target);
	for (int index = 0; index < fieldSerializers.length; index++) {
		fieldSerializers[index].serialize(getField(record, index), target);
	}
}
 
Example #23
Source File: ParquetTableSource.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
private FilterPredicate lessThanOrEqual(Expression exp, Tuple2<Column, Comparable> columnPair) {
	Preconditions.checkArgument(exp instanceof LessThanOrEqual, "exp has to be LessThanOrEqual");
	if (columnPair.f0 instanceof IntColumn) {
		return FilterApi.ltEq((IntColumn) columnPair.f0, (Integer) columnPair.f1);
	} else if (columnPair.f0 instanceof LongColumn) {
		return FilterApi.ltEq((LongColumn) columnPair.f0, (Long) columnPair.f1);
	} else if (columnPair.f0 instanceof DoubleColumn) {
		return FilterApi.ltEq((DoubleColumn) columnPair.f0, (Double) columnPair.f1);
	} else if (columnPair.f0 instanceof FloatColumn) {
		return FilterApi.ltEq((FloatColumn) columnPair.f0, (Float) columnPair.f1);
	}

	return null;
}
 
Example #24
Source File: CirculantGraph.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Required configuration for each range of offsets in the graph.
 *
 * @param offset first offset appointing the vertices' position
 * @param length number of contiguous offsets in range
 * @return this
 */
public CirculantGraph addRange(long offset, long length) {
	Preconditions.checkArgument(offset >= MINIMUM_OFFSET,
		"Range offset must be at least " + MINIMUM_OFFSET);
	Preconditions.checkArgument(length <= vertexCount - offset,
		"Range length must not be greater than the vertex count minus the range offset.");

	offsetRanges.add(new OffsetRange(offset, length));

	return this;
}
 
Example #25
Source File: YarnApplicationStatusMonitor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public YarnApplicationStatusMonitor(
		YarnClient yarnClient,
		ApplicationId yarnApplicationId,
		ScheduledExecutor scheduledExecutor) {
	this.yarnClient = Preconditions.checkNotNull(yarnClient);
	this.yarnApplicationId = Preconditions.checkNotNull(yarnApplicationId);

	applicationStatusUpdateFuture = scheduledExecutor.scheduleWithFixedDelay(
		this::updateApplicationStatus,
		0L,
		UPDATE_INTERVAL,
		TimeUnit.MILLISECONDS);

	applicationStatus = ApplicationStatus.UNKNOWN;
}
 
Example #26
Source File: ElasticsearchUpsertTableSinkBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public ElasticsearchUpsertTableSinkBase(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions,
		RequestFactory requestFactory) {

	this.isAppendOnly = isAppendOnly;
	this.schema = TableSchemaUtils.checkNoGeneratedColumns(schema);
	this.hosts = Preconditions.checkNotNull(hosts);
	this.index = Preconditions.checkNotNull(index);
	this.keyDelimiter = Preconditions.checkNotNull(keyDelimiter);
	this.keyNullLiteral = Preconditions.checkNotNull(keyNullLiteral);
	this.docType = Preconditions.checkNotNull(docType);
	this.serializationSchema = Preconditions.checkNotNull(serializationSchema);
	this.contentType = Preconditions.checkNotNull(contentType);
	this.failureHandler = Preconditions.checkNotNull(failureHandler);
	this.sinkOptions = Preconditions.checkNotNull(sinkOptions);
	this.requestFactory = Preconditions.checkNotNull(requestFactory);
}
 
Example #27
Source File: TypeSerializerSnapshotSerializationUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor for reading serializers.
 */
TypeSerializerSnapshotSerializationProxy(
	ClassLoader userCodeClassLoader,
	@Nullable TypeSerializer<T> existingPriorSerializer) {
	this.userCodeClassLoader = Preconditions.checkNotNull(userCodeClassLoader);
	this.serializer = existingPriorSerializer;
}
 
Example #28
Source File: HeapMapState.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new key/value state for the given hash map of key/value pairs.
 *
 * @param stateTable The state table for which this state is associated to.
 * @param keySerializer The serializer for the keys.
 * @param valueSerializer The serializer for the state.
 * @param namespaceSerializer The serializer for the namespace.
 * @param defaultValue The default value for the state.
 */
private HeapMapState(
	StateTable<K, N, Map<UK, UV>> stateTable,
	TypeSerializer<K> keySerializer,
	TypeSerializer<Map<UK, UV>> valueSerializer,
	TypeSerializer<N> namespaceSerializer,
	Map<UK, UV> defaultValue) {
	super(stateTable, keySerializer, valueSerializer, namespaceSerializer, defaultValue);

	Preconditions.checkState(valueSerializer instanceof MapSerializer, "Unexpected serializer type.");
}
 
Example #29
Source File: TimerService.java    From flink with Apache License 2.0 5 votes vote down vote up
public TimerService(
		final ScheduledExecutorService scheduledExecutorService,
		final long shutdownTimeout) {
	this.scheduledExecutorService = Preconditions.checkNotNull(scheduledExecutorService);

	Preconditions.checkArgument(shutdownTimeout >= 0L, "The shut down timeout must be larger than or equal than 0.");
	this.shutdownTimeout = shutdownTimeout;

	this.timeouts = new HashMap<>(16);
	this.timeoutListener = null;
}
 
Example #30
Source File: ZooKeeperLeaderElectionService.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a ZooKeeperLeaderElectionService object.
 *
 * @param client Client which is connected to the ZooKeeper quorum
 * @param latchPath ZooKeeper node path for the leader election latch
 * @param leaderPath ZooKeeper node path for the node which stores the current leader information
 */
public ZooKeeperLeaderElectionService(CuratorFramework client, String latchPath, String leaderPath) {
	this.client = Preconditions.checkNotNull(client, "CuratorFramework client");
	this.leaderPath = Preconditions.checkNotNull(leaderPath, "leaderPath");

	leaderLatch = new LeaderLatch(client, latchPath);
	cache = new NodeCache(client, leaderPath);

	issuedLeaderSessionID = null;
	confirmedLeaderSessionID = null;
	leaderContender = null;

	running = false;
}