org.apache.flink.runtime.state.StateBackend Java Examples

The following examples show how to use org.apache.flink.runtime.state.StateBackend. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ResumeCheckpointManuallyITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph getJobGraph(StateBackend backend, @Nullable String externalCheckpoint) {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.enableCheckpointing(500);
	env.setStateBackend(backend);
	env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);
	env.setParallelism(PARALLELISM);
	env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

	env.addSource(new NotifyingInfiniteTupleSource(10_000))
		.keyBy(0)
		.timeWindow(Time.seconds(3))
		.reduce((value1, value2) -> Tuple2.of(value1.f0, value1.f1 + value2.f1))
		.filter(value -> value.f0.startsWith("Tuple 0"));

	StreamGraph streamGraph = env.getStreamGraph("Test");

	JobGraph jobGraph = streamGraph.getJobGraph();

	// recover from previous iteration?
	if (externalCheckpoint != null) {
		jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(externalCheckpoint));
	}

	return jobGraph;
}
 
Example #2
Source File: KeyedStateInputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an input format for reading partitioned state from an operator in a savepoint.
 *
 * @param operatorState The state to be queried.
 * @param stateBackend  The state backed used to snapshot the operator.
 * @param keyType       The type information describing the key type.
 * @param userFunction  The {@link KeyedStateReaderFunction} called for each key in the operator.
 */
public KeyedStateInputFormat(
	OperatorState operatorState,
	StateBackend stateBackend,
	TypeInformation<K> keyType,
	KeyedStateReaderFunction<K, OUT> userFunction) {
	Preconditions.checkNotNull(operatorState, "The operator state cannot be null");
	Preconditions.checkNotNull(stateBackend, "The state backend cannot be null");
	Preconditions.checkNotNull(keyType, "The key type information cannot be null");
	Preconditions.checkNotNull(userFunction, "The userfunction cannot be null");

	this.operatorState = operatorState;
	this.stateBackend = stateBackend;
	this.keyType = keyType;
	this.userFunction = userFunction;
}
 
Example #3
Source File: JobCheckpointingSettings.java    From flink with Apache License 2.0 6 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend,
		@Nullable SerializedValue<MasterTriggerRestoreHook.Factory[]> masterHooks) {


	this.verticesToTrigger = requireNonNull(verticesToTrigger);
	this.verticesToAcknowledge = requireNonNull(verticesToAcknowledge);
	this.verticesToConfirm = requireNonNull(verticesToConfirm);
	this.checkpointCoordinatorConfiguration = Preconditions.checkNotNull(checkpointCoordinatorConfiguration);
	this.defaultStateBackend = defaultStateBackend;
	this.masterHooks = masterHooks;
}
 
Example #4
Source File: JobCheckpointingSettings.java    From flink with Apache License 2.0 6 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend,
		@Nullable SerializedValue<MasterTriggerRestoreHook.Factory[]> masterHooks) {


	this.verticesToTrigger = requireNonNull(verticesToTrigger);
	this.verticesToAcknowledge = requireNonNull(verticesToAcknowledge);
	this.verticesToConfirm = requireNonNull(verticesToConfirm);
	this.checkpointCoordinatorConfiguration = Preconditions.checkNotNull(checkpointCoordinatorConfiguration);
	this.defaultStateBackend = defaultStateBackend;
	this.masterHooks = masterHooks;
}
 
Example #5
Source File: TaskCheckpointingBehaviourTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testBlockingNonInterruptibleCheckpoint() throws Exception {

	StateBackend lockingStateBackend = new BackendForTestStream(LockingOutputStream::new);

	Task task =
		createTask(new TestOperator(), lockingStateBackend, mock(CheckpointResponder.class), true);

	// start the task and wait until it is in "restore"
	task.startTaskThread();
	IN_CHECKPOINT_LATCH.await();

	// cancel the task and wait. unless cancellation properly closes
	// the streams, this will never terminate
	task.cancelExecution();
	task.getExecutingThread().join();

	assertEquals(ExecutionState.CANCELED, task.getExecutionState());
	assertNull(task.getFailureCause());
}
 
Example #6
Source File: AbstractOperatorRestoreTestBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(ExecutionMode mode) {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStateBackend((StateBackend) new MemoryStateBackend());

	switch (mode) {
		case MIGRATE:
			createMigrationJob(env);
			break;
		case RESTORE:
			createRestoredJob(env);
			break;
	}

	return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
 
Example #7
Source File: DispatcherTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Nonnull
private URI createTestingSavepoint() throws IOException, URISyntaxException {
	final StateBackend stateBackend = Checkpoints.loadStateBackend(configuration, Thread.currentThread().getContextClassLoader(), log);
	final CheckpointStorageCoordinatorView checkpointStorage = stateBackend.createCheckpointStorage(jobGraph.getJobID());
	final File savepointFile = temporaryFolder.newFolder();
	final long checkpointId = 1L;

	final CheckpointStorageLocation checkpointStorageLocation = checkpointStorage.initializeLocationForSavepoint(checkpointId, savepointFile.getAbsolutePath());

	final CheckpointMetadataOutputStream metadataOutputStream = checkpointStorageLocation.createMetadataOutputStream();
	Checkpoints.storeCheckpointMetadata(new SavepointV2(checkpointId, Collections.emptyList(), Collections.emptyList()), metadataOutputStream);

	final CompletedCheckpointStorageLocation completedCheckpointStorageLocation = metadataOutputStream.closeAndFinalizeCheckpoint();

	return new URI(completedCheckpointStorageLocation.getExternalPointer());

}
 
Example #8
Source File: DataStreamAllroundTestJobFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void setupStateBackend(final StreamExecutionEnvironment env, final ParameterTool pt) throws IOException {
	final String stateBackend = pt.get(
		STATE_BACKEND.key(),
		STATE_BACKEND.defaultValue());

	final String checkpointDir = pt.getRequired(STATE_BACKEND_CHECKPOINT_DIR.key());

	if ("file".equalsIgnoreCase(stateBackend)) {
		boolean asyncCheckpoints = pt.getBoolean(
			STATE_BACKEND_FILE_ASYNC.key(),
			STATE_BACKEND_FILE_ASYNC.defaultValue());

		env.setStateBackend((StateBackend) new FsStateBackend(checkpointDir, asyncCheckpoints));
	} else if ("rocks".equalsIgnoreCase(stateBackend)) {
		boolean incrementalCheckpoints = pt.getBoolean(
			STATE_BACKEND_ROCKS_INCREMENTAL.key(),
			STATE_BACKEND_ROCKS_INCREMENTAL.defaultValue());

		env.setStateBackend((StateBackend) new RocksDBStateBackend(checkpointDir, incrementalCheckpoints));
	} else {
		throw new IllegalArgumentException("Unknown backend requested: " + stateBackend);
	}
}
 
Example #9
Source File: BootstrapTransformation.java    From flink with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
StreamConfig getConfig(OperatorID operatorID, StateBackend stateBackend, StreamOperator<TaggedOperatorSubtaskState> operator) {
	// Eagerly perform a deep copy of the configuration, otherwise it will result in undefined behavior
	// when deploying with multiple bootstrap transformations.
	Configuration deepCopy = new Configuration(dataSet.getExecutionEnvironment().getConfiguration());
	final StreamConfig config = new StreamConfig(deepCopy);
	config.setChainStart();
	config.setCheckpointingEnabled(true);
	config.setCheckpointMode(CheckpointingMode.EXACTLY_ONCE);

	if (keyType != null) {
		TypeSerializer<?> keySerializer = keyType.createSerializer(dataSet.getExecutionEnvironment().getConfig());

		config.setStateKeySerializer(keySerializer);
		config.setStatePartitioner(0, originalKeySelector);
	}

	config.setStreamOperator(operator);
	config.setOperatorName(operatorID.toHexString());
	config.setOperatorID(operatorID);
	config.setStateBackend(stateBackend);
	return config;
}
 
Example #10
Source File: KeyedStateInputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an input format for reading partitioned state from an operator in a savepoint.
 *
 * @param operatorState The state to be queried.
 * @param stateBackend  The state backed used to snapshot the operator.
 * @param configuration The underlying Flink configuration used to configure the state backend.
 */
public KeyedStateInputFormat(
	OperatorState operatorState,
	StateBackend stateBackend,
	Configuration configuration,
	StateReaderOperator<?, K, N, OUT> operator) {
	Preconditions.checkNotNull(operatorState, "The operator state cannot be null");
	Preconditions.checkNotNull(stateBackend, "The state backend cannot be null");
	Preconditions.checkNotNull(configuration, "The configuration cannot be null");
	Preconditions.checkNotNull(operator, "The operator cannot be null");

	this.operatorState = operatorState;
	this.stateBackend = stateBackend;
	// Eagerly deep copy the configuration object
	// otherwise there will be undefined behavior
	// when executing pipelines with multiple input formats
	this.configuration = new Configuration(configuration);
	this.operator = operator;
}
 
Example #11
Source File: JobCheckpointingSettings.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend,
		@Nullable SerializedValue<MasterTriggerRestoreHook.Factory[]> masterHooks) {


	this.verticesToTrigger = requireNonNull(verticesToTrigger);
	this.verticesToAcknowledge = requireNonNull(verticesToAcknowledge);
	this.verticesToConfirm = requireNonNull(verticesToConfirm);
	this.checkpointCoordinatorConfiguration = Preconditions.checkNotNull(checkpointCoordinatorConfiguration);
	this.defaultStateBackend = defaultStateBackend;
	this.masterHooks = masterHooks;
}
 
Example #12
Source File: DispatcherTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Nonnull
private URI createTestingSavepoint() throws IOException, URISyntaxException {
	final StateBackend stateBackend = Checkpoints.loadStateBackend(configuration, Thread.currentThread().getContextClassLoader(), log);
	final CheckpointStorage checkpointStorage = stateBackend.createCheckpointStorage(jobGraph.getJobID());
	final File savepointFile = temporaryFolder.newFolder();
	final long checkpointId = 1L;

	final CheckpointStorageLocation checkpointStorageLocation = checkpointStorage.initializeLocationForSavepoint(checkpointId, savepointFile.getAbsolutePath());

	final CheckpointMetadataOutputStream metadataOutputStream = checkpointStorageLocation.createMetadataOutputStream();
	Checkpoints.storeCheckpointMetadata(new SavepointV2(checkpointId, Collections.emptyList(), Collections.emptyList()), metadataOutputStream);

	final CompletedCheckpointStorageLocation completedCheckpointStorageLocation = metadataOutputStream.closeAndFinalizeCheckpoint();

	return new URI(completedCheckpointStorageLocation.getExternalPointer());

}
 
Example #13
Source File: AbstractOperatorRestoreTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(ExecutionMode mode) {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStateBackend((StateBackend) new MemoryStateBackend());

	switch (mode) {
		case MIGRATE:
			createMigrationJob(env);
			break;
		case RESTORE:
			createRestoredJob(env);
			break;
	}

	return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
 
Example #14
Source File: MockStreamTaskBuilder.java    From flink with Apache License 2.0 5 votes vote down vote up
public MockStreamTaskBuilder(Environment environment) throws Exception {
	this.environment = environment;
	this.config = new StreamConfig(environment.getTaskConfiguration());

	StateBackend stateBackend = new MemoryStateBackend();
	this.checkpointStorage = stateBackend.createCheckpointStorage(new JobID());
	this.streamTaskStateInitializer = new StreamTaskStateInitializerImpl(environment, stateBackend);
}
 
Example #15
Source File: StreamTask.java    From flink with Apache License 2.0 5 votes vote down vote up
private StateBackend createStateBackend() throws Exception {
	final StateBackend fromApplication = configuration.getStateBackend(getUserCodeClassLoader());

	return StateBackendLoader.fromApplicationOrConfigOrDefault(
			fromApplication,
			getEnvironment().getTaskManagerInfo().getConfiguration(),
			getUserCodeClassLoader(),
			LOG);
}
 
Example #16
Source File: StreamExecutionEnvironmentComplexConfigurationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testNotOverridingStateBackendWithDefaultsFromConfiguration() {
	StreamExecutionEnvironment envFromConfiguration = StreamExecutionEnvironment.getExecutionEnvironment();
	envFromConfiguration.setStateBackend(new MemoryStateBackend());

	// mutate config according to configuration
	envFromConfiguration.configure(new Configuration(), Thread.currentThread().getContextClassLoader());

	StateBackend actualStateBackend = envFromConfiguration.getStateBackend();
	assertThat(actualStateBackend, instanceOf(MemoryStateBackend.class));
}
 
Example #17
Source File: DataStreamStateTTLTestProgram.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets the state backend to a new {@link StubStateBackend} which has a {@link MonotonicTTLTimeProvider}.
 *
 * @param env The {@link StreamExecutionEnvironment} of the job.
 */
private static void setBackendWithCustomTTLTimeProvider(StreamExecutionEnvironment env) {
	final MonotonicTTLTimeProvider ttlTimeProvider = new MonotonicTTLTimeProvider();

	final StateBackend configuredBackend = env.getStateBackend();
	final StateBackend stubBackend = new StubStateBackend(configuredBackend, ttlTimeProvider);
	env.setStateBackend(stubBackend);
}
 
Example #18
Source File: StreamTaskStateInitializerImpl.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public StreamTaskStateInitializerImpl(
	Environment environment,
	StateBackend stateBackend,
	ProcessingTimeService processingTimeService) {

	this.environment = environment;
	this.taskStateManager = Preconditions.checkNotNull(environment.getTaskStateManager());
	this.stateBackend = Preconditions.checkNotNull(stateBackend);
	this.processingTimeService = processingTimeService;
}
 
Example #19
Source File: DataStreamStateTTLTestProgram.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets the state backend to a new {@link StubStateBackend} which has a {@link MonotonicTTLTimeProvider}.
 *
 * @param env The {@link StreamExecutionEnvironment} of the job.
 */
private static void setBackendWithCustomTTLTimeProvider(StreamExecutionEnvironment env) {
	final MonotonicTTLTimeProvider ttlTimeProvider = new MonotonicTTLTimeProvider();

	final StateBackend configuredBackend = env.getStateBackend();
	if (configuredBackend instanceof RocksDBStateBackend) {
		((RocksDBStateBackend) configuredBackend).enableTtlCompactionFilter();
	}
	final StateBackend stubBackend = new StubStateBackend(configuredBackend, ttlTimeProvider);
	env.setStateBackend(stubBackend);
}
 
Example #20
Source File: StreamConfig.java    From flink with Apache License 2.0 5 votes vote down vote up
public void setStateBackend(StateBackend backend) {
	if (backend != null) {
		try {
			InstantiationUtil.writeObjectToConfig(backend, this.config, STATE_BACKEND);
		} catch (Exception e) {
			throw new StreamTaskException("Could not serialize stateHandle provider.", e);
		}
	}
}
 
Example #21
Source File: AbstractStreamOperatorTestHarness.java    From flink with Apache License 2.0 5 votes vote down vote up
protected StreamTaskStateInitializer createStreamTaskStateManager(
	Environment env,
	StateBackend stateBackend,
	TtlTimeProvider ttlTimeProvider) {
	return new StreamTaskStateInitializerImpl(
		env,
		stateBackend,
		ttlTimeProvider);
}
 
Example #22
Source File: ResumeCheckpointManuallyITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private static String runJobAndGetExternalizedCheckpoint(StateBackend backend, File checkpointDir, @Nullable String externalCheckpoint, ClusterClient<?> client) throws Exception {
	JobGraph initialJobGraph = getJobGraph(backend, externalCheckpoint);
	NotifyingInfiniteTupleSource.countDownLatch = new CountDownLatch(PARALLELISM);

	client.submitJob(initialJobGraph, ResumeCheckpointManuallyITCase.class.getClassLoader());

	// wait until all sources have been started
	NotifyingInfiniteTupleSource.countDownLatch.await();

	waitUntilExternalizedCheckpointCreated(checkpointDir, initialJobGraph.getJobID());
	client.cancel(initialJobGraph.getJobID());
	waitUntilCanceled(initialJobGraph.getJobID(), client);

	return getExternalizedCheckpointCheckpointPath(checkpointDir, initialJobGraph.getJobID());
}
 
Example #23
Source File: EnableCheckpointMain.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    //创建流运行环境
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setGlobalJobParameters(ParameterTool.fromArgs(args));
    env.setParallelism(1);

    env.addSource(new SourceFunction<Long>() {
        @Override
        public void run(SourceContext<Long> sourceContext) throws Exception {
            while (true) {
                sourceContext.collect(null);
            }
        }
        @Override
        public void cancel() {
        }
    })
            .map((MapFunction<Long, Long>) aLong -> aLong / 1)
            .print();

    //开启 checkpoint
    StateBackend stateBackend = new MemoryStateBackend(5 * 1024 * 1024 * 100);
    env.enableCheckpointing(10000);
    env.setStateBackend(stateBackend);

    env.execute("zhisheng default RestartStrategy enable checkpoint example");
}
 
Example #24
Source File: MockTtlStateTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected StateBackendTestContext createStateBackendTestContext(TtlTimeProvider timeProvider) {
	return new StateBackendTestContext(timeProvider) {
		@Override
		protected StateBackend createStateBackend() {
			return new MockStateBackend();
		}
	};
}
 
Example #25
Source File: RocksDBTtlStateTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) {
	String dbPath;
	String checkpointPath;
	try {
		dbPath = tempFolder.newFolder().getAbsolutePath();
		checkpointPath = tempFolder.newFolder().toURI().toString();
	} catch (IOException e) {
		throw new FlinkRuntimeException("Failed to init rocksdb test state backend");
	}
	RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing);
	Configuration config = new Configuration();
	backend = backend.configure(config, Thread.currentThread().getContextClassLoader());
	backend.setDbStoragePath(dbPath);
	return backend;
}
 
Example #26
Source File: EnableCheckpointMain.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    //创建流运行环境
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setGlobalJobParameters(ParameterTool.fromArgs(args));
    env.setParallelism(1);

    env.addSource(new SourceFunction<Long>() {
        @Override
        public void run(SourceContext<Long> sourceContext) throws Exception {
            while (true) {
                sourceContext.collect(null);
            }
        }
        @Override
        public void cancel() {
        }
    })
            .map((MapFunction<Long, Long>) aLong -> aLong / 1)
            .print();

    //开启 checkpoint
    StateBackend stateBackend = new MemoryStateBackend(5 * 1024 * 1024 * 100);
    env.enableCheckpointing(10000);
    env.setStateBackend(stateBackend);

    env.execute("zhisheng default RestartStrategy enable checkpoint example");
}
 
Example #27
Source File: StreamTask.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private StateBackend createStateBackend() throws Exception {
	final StateBackend fromApplication = configuration.getStateBackend(getUserCodeClassLoader());

	return StateBackendLoader.fromApplicationOrConfigOrDefault(
			fromApplication,
			getEnvironment().getTaskManagerInfo().getConfiguration(),
			getUserCodeClassLoader(),
			LOG);
}
 
Example #28
Source File: ExistingSavepoint.java    From flink with Apache License 2.0 5 votes vote down vote up
ExistingSavepoint(ExecutionEnvironment env, SavepointMetadata metadata, StateBackend stateBackend) throws IOException {
	super(metadata, stateBackend);
	Preconditions.checkNotNull(env, "The execution environment must not be null");
	Preconditions.checkNotNull(metadata, "The savepoint metadata must not be null");
	Preconditions.checkNotNull(stateBackend, "The state backend must not be null");

	this.env = env;
	this.metadata = metadata;
	this.stateBackend = stateBackend;
}
 
Example #29
Source File: StreamConfig.java    From flink with Apache License 2.0 5 votes vote down vote up
public StateBackend getStateBackend(ClassLoader cl) {
	try {
		return InstantiationUtil.readObjectFromConfig(this.config, STATE_BACKEND, cl);
	} catch (Exception e) {
		throw new StreamTaskException("Could not instantiate statehandle provider.", e);
	}
}
 
Example #30
Source File: JobCheckpointingSettings.java    From flink with Apache License 2.0 5 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend) {

	this(
		verticesToTrigger,
		verticesToAcknowledge,
		verticesToConfirm,
		checkpointCoordinatorConfiguration,
		defaultStateBackend,
		null);
}