Java Code Examples for org.apache.flink.annotation.VisibleForTesting
The following examples show how to use
org.apache.flink.annotation.VisibleForTesting.
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: flink Author: flink-tpc-ds File: MiniCluster.java License: Apache License 2.0 | 6 votes |
@VisibleForTesting void startTaskExecutor() throws Exception { synchronized (lock) { final Configuration configuration = miniClusterConfiguration.getConfiguration(); final TaskExecutor taskExecutor = TaskManagerRunner.startTaskManager( configuration, new ResourceID(UUID.randomUUID().toString()), taskManagerRpcServiceFactory.createRpcService(), haServices, heartbeatServices, metricRegistry, blobCacheService, useLocalCommunication(), taskManagerTerminatingFatalErrorHandlerFactory.create(taskManagers.size())); taskExecutor.start(); taskManagers.add(taskExecutor); } }
Example #2
Source Project: flink Author: flink-tpc-ds File: Dispatcher.java License: Apache License 2.0 | 6 votes |
/** * Recovers all jobs persisted via the submitted job graph store. */ @VisibleForTesting Collection<JobGraph> recoverJobs() throws Exception { log.info("Recovering all persisted jobs."); final Collection<JobID> jobIds = submittedJobGraphStore.getJobIds(); try { return recoverJobGraphs(jobIds); } catch (Exception e) { // release all recovered job graphs for (JobID jobId : jobIds) { try { submittedJobGraphStore.releaseJobGraph(jobId); } catch (Exception ie) { e.addSuppressed(ie); } } throw e; } }
Example #3
Source Project: flink Author: flink-tpc-ds File: MiniCluster.java License: Apache License 2.0 | 6 votes |
@VisibleForTesting protected Collection<? extends DispatcherResourceManagerComponent<?>> createDispatcherResourceManagerComponents( Configuration configuration, RpcServiceFactory rpcServiceFactory, HighAvailabilityServices haServices, BlobServer blobServer, HeartbeatServices heartbeatServices, MetricRegistry metricRegistry, MetricQueryServiceRetriever metricQueryServiceRetriever, FatalErrorHandler fatalErrorHandler) throws Exception { SessionDispatcherResourceManagerComponentFactory dispatcherResourceManagerComponentFactory = createDispatcherResourceManagerComponentFactory(); return Collections.singleton( dispatcherResourceManagerComponentFactory.create( configuration, rpcServiceFactory.createRpcService(), haServices, blobServer, heartbeatServices, metricRegistry, new MemoryArchivedExecutionGraphStore(), metricQueryServiceRetriever, fatalErrorHandler)); }
Example #4
Source Project: flink Author: flink-tpc-ds File: ExecutionJobVertex.java License: Apache License 2.0 | 6 votes |
/** * Convenience constructor for testing. */ @VisibleForTesting ExecutionJobVertex( ExecutionGraph graph, JobVertex jobVertex, int defaultParallelism, Time timeout) throws JobException { this( graph, jobVertex, defaultParallelism, JobManagerOptions.MAX_ATTEMPTS_HISTORY_SIZE.defaultValue(), timeout, 1L, System.currentTimeMillis()); }
Example #5
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraph.java License: Apache License 2.0 | 6 votes |
@VisibleForTesting ExecutionGraph( JobInformation jobInformation, ScheduledExecutorService futureExecutor, Executor ioExecutor, Time timeout, RestartStrategy restartStrategy, FailoverStrategy.Factory failoverStrategy, SlotProvider slotProvider) throws IOException { this( jobInformation, futureExecutor, ioExecutor, timeout, restartStrategy, failoverStrategy, slotProvider, ExecutionGraph.class.getClassLoader(), VoidBlobWriter.getInstance(), timeout); }
Example #6
Source Project: flink Author: flink-tpc-ds File: SingleInputGate.java License: Apache License 2.0 | 6 votes |
@VisibleForTesting void requestPartitions() throws IOException, InterruptedException { synchronized (requestLock) { if (!requestedPartitionsFlag) { if (closeFuture.isDone()) { throw new IllegalStateException("Already released."); } // Sanity checks if (numberOfInputChannels != inputChannels.size()) { throw new IllegalStateException(String.format( "Bug in input gate setup logic: mismatch between " + "number of total input channels [%s] and the currently set number of input " + "channels [%s].", inputChannels.size(), numberOfInputChannels)); } for (InputChannel inputChannel : inputChannels.values()) { inputChannel.requestSubpartition(consumedSubpartitionIndex); } } requestedPartitionsFlag = true; } }
Example #7
Source Project: Flink-CEPplus Author: ljygz File: RocksDBKeyedStateBackend.java License: Apache License 2.0 | 6 votes |
@VisibleForTesting @SuppressWarnings("unchecked") @Override public int numKeyValueStateEntries() { int count = 0; for (RocksDbKvStateInfo metaInfo : kvStateInformation.values()) { //TODO maybe filterOrTransform only for k/v states try (RocksIteratorWrapper rocksIterator = RocksDBOperationUtils.getRocksIterator(db, metaInfo.columnFamilyHandle)) { rocksIterator.seekToFirst(); while (rocksIterator.isValid()) { count++; rocksIterator.next(); } } } return count; }
Example #8
Source Project: Flink-CEPplus Author: ljygz File: RefCountedBufferingFileStream.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public RefCountedBufferingFileStream( final RefCountedFile file, final int bufferSize) { checkArgument(bufferSize > 0L); this.currentTmpFile = checkNotNull(file); this.buffer = new byte[bufferSize]; this.positionInBuffer = 0; this.closed = false; }
Example #9
Source Project: Flink-CEPplus Author: ljygz File: S3RecoverableWriter.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting S3RecoverableWriter( final S3AccessHelper s3AccessHelper, final S3RecoverableMultipartUploadFactory uploadFactory, final FunctionWithException<File, RefCountedFile, IOException> tempFileCreator, final long userDefinedMinPartSize) { this.s3AccessHelper = checkNotNull(s3AccessHelper); this.uploadFactory = checkNotNull(uploadFactory); this.tempFileCreator = checkNotNull(tempFileCreator); this.userDefinedMinPartSize = userDefinedMinPartSize; }
Example #10
Source Project: flink Author: flink-tpc-ds File: ElasticsearchSinkBase.java License: Apache License 2.0 | 5 votes |
/** * Build the {@link BulkProcessor}. * * <p>Note: this is exposed for testing purposes. */ @VisibleForTesting protected BulkProcessor buildBulkProcessor(BulkProcessor.Listener listener) { checkNotNull(listener); BulkProcessor.Builder bulkProcessorBuilder = callBridge.createBulkProcessorBuilder(client, listener); // This makes flush() blocking bulkProcessorBuilder.setConcurrentRequests(0); if (bulkProcessorFlushMaxActions != null) { bulkProcessorBuilder.setBulkActions(bulkProcessorFlushMaxActions); } if (bulkProcessorFlushMaxSizeMb != null) { bulkProcessorBuilder.setBulkSize(new ByteSizeValue(bulkProcessorFlushMaxSizeMb, ByteSizeUnit.MB)); } if (bulkProcessorFlushIntervalMillis != null) { bulkProcessorBuilder.setFlushInterval(TimeValue.timeValueMillis(bulkProcessorFlushIntervalMillis)); } // if backoff retrying is disabled, bulkProcessorFlushBackoffPolicy will be null callBridge.configureBulkProcessorBackoff(bulkProcessorBuilder, bulkProcessorFlushBackoffPolicy); return bulkProcessorBuilder.build(); }
Example #11
Source Project: flink Author: flink-tpc-ds File: ListAggWsWithRetractAggFunction.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ListAggWsWithRetractAccumulator that = (ListAggWsWithRetractAccumulator) o; return Objects.equals(list, that.list) && Objects.equals(retractList, that.retractList) && Objects.equals(delimiter, that.delimiter); }
Example #12
Source Project: flink Author: flink-tpc-ds File: DefaultExecutionSlotAllocator.java License: Apache License 2.0 | 5 votes |
/** * Computes and returns a set with the prior allocation ids from all execution vertices scheduled together. * * @param executionVertexSchedulingRequirements contains the execution vertices which are scheduled together */ @VisibleForTesting static Set<AllocationID> computeAllPriorAllocationIds( Collection<ExecutionVertexSchedulingRequirements> executionVertexSchedulingRequirements) { return executionVertexSchedulingRequirements .stream() .map(ExecutionVertexSchedulingRequirements::getPreviousAllocationId) .filter(Objects::nonNull) .collect(Collectors.toSet()); }
Example #13
Source Project: Flink-CEPplus Author: ljygz File: MiniCluster.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting @Nonnull protected Collection<DispatcherResourceManagerComponent<?>> getDispatcherResourceManagerComponents() { synchronized (lock) { return Collections.unmodifiableCollection(dispatcherResourceManagerComponents); } }
Example #14
Source Project: Flink-CEPplus Author: ljygz File: KinesisDataFetcher.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting protected KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, Object checkpointLock, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker, AtomicReference<Throwable> error, List<KinesisStreamShardState> subscribedShardsState, HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds, FlinkKinesisProxyFactory kinesisProxyFactory) { this.streams = checkNotNull(streams); this.configProps = checkNotNull(configProps); this.sourceContext = checkNotNull(sourceContext); this.checkpointLock = checkNotNull(checkpointLock); this.runtimeContext = checkNotNull(runtimeContext); this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks(); this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask(); this.deserializationSchema = checkNotNull(deserializationSchema); this.shardAssigner = checkNotNull(shardAssigner); this.periodicWatermarkAssigner = periodicWatermarkAssigner; this.watermarkTracker = watermarkTracker; this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory); this.kinesis = kinesisProxyFactory.create(configProps); this.consumerMetricGroup = runtimeContext.getMetricGroup() .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP); this.error = checkNotNull(error); this.subscribedShardsState = checkNotNull(subscribedShardsState); this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds); this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks()); this.recordEmitter = createRecordEmitter(configProps); }
Example #15
Source Project: flink Author: flink-tpc-ds File: StateSnapshotContextSynchronousImpl.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public StateSnapshotContextSynchronousImpl(long checkpointId, long checkpointTimestamp) { this.checkpointId = checkpointId; this.checkpointTimestamp = checkpointTimestamp; this.streamFactory = null; this.keyGroupRange = KeyGroupRange.EMPTY_KEY_GROUP_RANGE; this.closableRegistry = new CloseableRegistry(); }
Example #16
Source Project: Flink-CEPplus Author: ljygz File: NestedMapsStateTable.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting Map<N, Map<K, S>> getMapForKeyGroup(int keyGroupIndex) { final int pos = indexToOffset(keyGroupIndex); if (pos >= 0 && pos < state.length) { return state[pos]; } else { return null; } }
Example #17
Source Project: flink Author: flink-tpc-ds File: InternalTimeServiceManager.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public int numEventTimeTimers() { int count = 0; for (InternalTimerServiceImpl<?, ?> timerService : timerServices.values()) { count += timerService.numEventTimeTimers(); } return count; }
Example #18
Source Project: flink Author: flink-tpc-ds File: BucketStateSerializer.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting BucketState<BucketID> deserializeV1(DataInputView in) throws IOException { final BucketID bucketId = SimpleVersionedSerialization.readVersionAndDeSerialize(bucketIdSerializer, in); final String bucketPathStr = in.readUTF(); final long creationTime = in.readLong(); // then get the current resumable stream RecoverableWriter.ResumeRecoverable current = null; if (in.readBoolean()) { current = SimpleVersionedSerialization.readVersionAndDeSerialize(resumableSerializer, in); } final int committableVersion = in.readInt(); final int numCheckpoints = in.readInt(); final HashMap<Long, List<RecoverableWriter.CommitRecoverable>> resumablesPerCheckpoint = new HashMap<>(numCheckpoints); for (int i = 0; i < numCheckpoints; i++) { final long checkpointId = in.readLong(); final int noOfResumables = in.readInt(); final List<RecoverableWriter.CommitRecoverable> resumables = new ArrayList<>(noOfResumables); for (int j = 0; j < noOfResumables; j++) { final byte[] bytes = new byte[in.readInt()]; in.readFully(bytes); resumables.add(commitableSerializer.deserialize(committableVersion, bytes)); } resumablesPerCheckpoint.put(checkpointId, resumables); } return new BucketState<>( bucketId, new Path(bucketPathStr), creationTime, current, resumablesPerCheckpoint); }
Example #19
Source Project: flink Author: flink-tpc-ds File: TaskDeploymentDescriptorFactory.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting static ShuffleDescriptor getConsumedPartitionShuffleDescriptor( ResultPartitionID consumedPartitionId, ResultPartitionType resultPartitionType, boolean isConsumable, ExecutionState producerState, boolean allowUnknownPartitions, @Nullable ResultPartitionDeploymentDescriptor consumedPartitionDescriptor) { // The producing task needs to be RUNNING or already FINISHED if ((resultPartitionType.isPipelined() || isConsumable) && consumedPartitionDescriptor != null && isProducerAvailable(producerState)) { // partition is already registered return consumedPartitionDescriptor.getShuffleDescriptor(); } else if (allowUnknownPartitions) { // The producing task might not have registered the partition yet return new UnknownShuffleDescriptor(consumedPartitionId); } else { // throw respective exceptions handleConsumedPartitionShuffleDescriptorErrors( consumedPartitionId, resultPartitionType, isConsumable, producerState); return null; // should never happen } }
Example #20
Source Project: Flink-CEPplus Author: ljygz File: AsyncSnapshotCallable.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting protected void cancel() { closeSnapshotIO(); if (resourceCleanupOwnershipTaken.compareAndSet(false, true)) { cleanup(); } }
Example #21
Source Project: Flink-CEPplus Author: ljygz File: SavepointV1Serializer.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public static void serializeOperatorStateHandle( OperatorStateHandle stateHandle, DataOutputStream dos) throws IOException { if (stateHandle != null) { dos.writeByte(PARTITIONABLE_OPERATOR_STATE_HANDLE); Map<String, OperatorStateHandle.StateMetaInfo> partitionOffsetsMap = stateHandle.getStateNameToPartitionOffsets(); dos.writeInt(partitionOffsetsMap.size()); for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> entry : partitionOffsetsMap.entrySet()) { dos.writeUTF(entry.getKey()); OperatorStateHandle.StateMetaInfo stateMetaInfo = entry.getValue(); int mode = stateMetaInfo.getDistributionMode().ordinal(); dos.writeByte(mode); long[] offsets = stateMetaInfo.getOffsets(); dos.writeInt(offsets.length); for (long offset : offsets) { dos.writeLong(offset); } } serializeStreamStateHandle(stateHandle.getDelegateStateHandle(), dos); } else { dos.writeByte(NULL_HANDLE); } }
Example #22
Source Project: flink Author: flink-tpc-ds File: ExecutionGraph.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public JobStatus waitUntilTerminal() throws InterruptedException { try { return terminationFuture.get(); } catch (ExecutionException e) { // this should never happen // it would be a bug, so we don't expect this to be handled and throw // an unchecked exception here throw new RuntimeException(e); } }
Example #23
Source Project: Flink-CEPplus Author: ljygz File: SystemProcessingTimeService.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting int getNumTasksScheduled() { BlockingQueue<?> queue = timerService.getQueue(); if (queue == null) { return 0; } else { return queue.size(); } }
Example #24
Source Project: flink Author: flink-tpc-ds File: RefCountedBufferingFileStream.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public RefCountedBufferingFileStream( final RefCountedFile file, final int bufferSize) { checkArgument(bufferSize > 0L); this.currentTmpFile = checkNotNull(file); this.buffer = new byte[bufferSize]; this.positionInBuffer = 0; this.closed = false; }
Example #25
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraph.java License: Apache License 2.0 | 5 votes |
/** * This constructor is for tests only, because it sets default values for many fields. */ @VisibleForTesting ExecutionGraph( ScheduledExecutorService futureExecutor, Executor ioExecutor, JobID jobId, String jobName, Configuration jobConfig, SerializedValue<ExecutionConfig> serializedConfig, Time timeout, RestartStrategy restartStrategy, SlotProvider slotProvider) throws IOException { this( new JobInformation( jobId, jobName, serializedConfig, jobConfig, Collections.emptyList(), Collections.emptyList()), futureExecutor, ioExecutor, timeout, restartStrategy, slotProvider); }
Example #26
Source Project: Flink-CEPplus Author: ljygz File: Scheduler.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting @Nullable public Instance getInstance(ResourceID resourceId) { for (Instance instance : allInstances) { if (Objects.equals(resourceId, instance.getTaskManagerID())) { return instance; } } return null; }
Example #27
Source Project: flink Author: flink-tpc-ds File: JobLeaderService.java License: Apache License 2.0 | 5 votes |
/** * Check whether the service monitors the given job. * * @param jobId identifying the job * @return True if the given job is monitored; otherwise false */ @VisibleForTesting public boolean containsJob(JobID jobId) { Preconditions.checkState(JobLeaderService.State.STARTED == state, "The service is currently not running."); return jobLeaderServices.containsKey(jobId); }
Example #28
Source Project: flink Author: flink-tpc-ds File: PartitionDescriptor.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public PartitionDescriptor( IntermediateDataSetID resultId, IntermediateResultPartitionID partitionId, ResultPartitionType partitionType, int numberOfSubpartitions, int connectionIndex) { this.resultId = checkNotNull(resultId); this.partitionId = checkNotNull(partitionId); this.partitionType = checkNotNull(partitionType); checkArgument(numberOfSubpartitions >= 1); this.numberOfSubpartitions = numberOfSubpartitions; this.connectionIndex = connectionIndex; }
Example #29
Source Project: flink Author: flink-tpc-ds File: SavepointV2Serializer.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting public static OperatorStateHandle deserializeOperatorStateHandle( DataInputStream dis) throws IOException { final int type = dis.readByte(); if (NULL_HANDLE == type) { return null; } else if (PARTITIONABLE_OPERATOR_STATE_HANDLE == type) { int mapSize = dis.readInt(); Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>(mapSize); for (int i = 0; i < mapSize; ++i) { String key = dis.readUTF(); int modeOrdinal = dis.readByte(); OperatorStateHandle.Mode mode = OperatorStateHandle.Mode.values()[modeOrdinal]; long[] offsets = new long[dis.readInt()]; for (int j = 0; j < offsets.length; ++j) { offsets[j] = dis.readLong(); } OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo(offsets, mode); offsetsMap.put(key, metaInfo); } StreamStateHandle stateHandle = deserializeStreamStateHandle(dis); return new OperatorStreamStateHandle(offsetsMap, stateHandle); } else { throw new IllegalStateException("Reading invalid OperatorStateHandle, type: " + type); } }
Example #30
Source Project: flink Author: flink-tpc-ds File: StandaloneJobClusterEntryPoint.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting static void setDefaultExecutionModeIfNotConfigured(Configuration configuration) { if (isNoExecutionModeConfigured(configuration)) { // In contrast to other places, the default for standalone job clusters is ExecutionMode.DETACHED configuration.setString(ClusterEntrypoint.EXECUTION_MODE, ExecutionMode.DETACHED.toString()); } }