29namespace modelchecker {
30template<
typename SparseMdpModelType>
36template<
typename SparseMdpModelType>
38 bool* requiresSingleInitialState) {
40 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
89 if (requiresSingleInitialState) {
90 *requiresSingleInitialState =
true;
99template<
typename SparseMdpModelType>
101 bool requiresSingleInitialState =
false;
102 if (canHandleStatic(checkTask, &requiresSingleInitialState)) {
103 return !requiresSingleInitialState || this->getModel().getInitialStates().getNumberOfSetBits() == 1;
109template<
typename SparseMdpModelType>
112 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
113 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented bounded until with intervals");
118 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
121 "Checking non-trivial bounded until probabilities can only be computed for the initial states of the model.");
127 auto formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(checkTask.
getFormula().asSharedPointer(), opInfo);
133 STORM_LOG_THROW(pathFormula.
hasUpperBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have (a single) upper step bound.");
135 "Formula lower step bound must be discrete/integral.");
137 "Formula needs to have discrete upper time bound.");
138 std::unique_ptr<CheckResult> leftResultPointer = this->check(env, pathFormula.
getLeftSubformula());
139 std::unique_ptr<CheckResult> rightResultPointer = this->check(env, pathFormula.
getRightSubformula());
143 std::vector<SolutionType> numericResult =
152template<
typename SparseMdpModelType>
157 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
158 std::unique_ptr<CheckResult> subResultPointer = this->check(env, pathFormula.
getSubformula());
165template<
typename SparseMdpModelType>
170 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
171 std::unique_ptr<CheckResult> leftResultPointer = this->check(env, pathFormula.
getLeftSubformula());
172 std::unique_ptr<CheckResult> rightResultPointer = this->check(env, pathFormula.
getRightSubformula());
181 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(std::move(ret.scheduler));
186template<
typename SparseMdpModelType>
191 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
192 std::unique_ptr<CheckResult> subResultPointer = this->check(env, pathFormula.
getSubformula());
199 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(std::move(ret.scheduler));
204template<
typename SparseMdpModelType>
207 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
208 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented automata-props with intervals");
216 return this->check(env, formula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
223 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(
231template<
typename SparseMdpModelType>
234 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
235 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented LTL with intervals");
240 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
246 return this->check(env, formula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
252 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(
260template<
typename SparseMdpModelType>
265 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
266 STORM_LOG_THROW(this->getModel().getInitialStates().hasUniqueSetBit(), storm::exceptions::InvalidPropertyException,
267 "Cannot compute conditional probabilities on MDPs with more than one initial state.");
269 "Conditional probabilities can only be computed for the initial states of the model.");
271 "Illegal conditional probability formula.");
273 "Illegal conditional probability formula.");
279 if constexpr (std::is_same_v<storm::Interval, ValueType>) {
280 throw exceptions::NotImplementedException() <<
"Conditional Probabilities are not supported with interval models";
283 this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(),
288template<
typename SparseMdpModelType>
293 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
295 if constexpr (std::is_same_v<storm::Interval, ValueType>) {
296 throw exceptions::NotImplementedException() <<
"Multi-reward bounded is not supported with interval models";
299 "Checking reward bounded cumulative reward formulas can only be done for the initial states of the model.");
301 "Checking reward bounded cumulative reward formulas is not supported if reward accumulations are given.");
307 auto formula = std::make_shared<storm::logic::RewardOperatorFormula>(checkTask.
getFormula().asSharedPointer(), checkTask.
getRewardModel(), opInfo);
314 STORM_LOG_THROW(rewardPathFormula.
hasIntegerBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have a discrete time bound.");
323template<
typename SparseMdpModelType>
328 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
329 STORM_LOG_THROW(rewardPathFormula.
hasIntegerBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have a discrete time bound.");
333 rewardPathFormula.
getBound<uint64_t>());
337template<
typename SparseMdpModelType>
342 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
343 std::unique_ptr<CheckResult> subResultPointer = this->check(env, eventuallyFormula.
getSubformula());
352 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(std::move(ret.scheduler));
357template<
typename SparseMdpModelType>
362 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
363 std::unique_ptr<CheckResult> subResultPointer = this->check(env, eventuallyFormula.
getSubformula());
371 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(std::move(ret.scheduler));
376template<
typename SparseMdpModelType>
380 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
387 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(std::move(ret.scheduler));
392template<
typename SparseMdpModelType>
395 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
396 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented LRA probabilities with intervals");
400 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
401 std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
410 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(
417template<
typename SparseMdpModelType>
420 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
421 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented lra with intervals");
424 "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
431 result->asExplicitQuantitativeCheckResult<
SolutionType>().setScheduler(
438template<
typename SparseMdpModelType>
441 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
442 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented multi-objective with intervals");
448template<
class SparseMdpModelType>
451 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
452 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented lexicographic model checking with intervals");
455 return this->check(env, formula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
463template<
typename SparseMdpModelType>
466 if constexpr (std::is_same_v<ValueType, storm::Interval>) {
467 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented quantile formulas with intervals");
470 "Computing quantiles is only supported for the initial states of a model.");
471 STORM_LOG_THROW(this->getModel().getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::InvalidOperationException,
472 "Quantiles not supported on models with multiple initial states.");
473 uint64_t initialState = *this->getModel().getInitialStates().begin();
478 if (res.size() == 1 && res.front().size() == 1) {
FragmentSpecification & setStepBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setRewardBoundedCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setRewardAccumulationAllowed(bool newValue)
FragmentSpecification & setOnlyEventuallyFormuluasInConditionalFormulasAllowed(bool newValue)
FragmentSpecification & setMultiDimensionalBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setTimeOperatorsAllowed(bool newValue)
FragmentSpecification & setTimeBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setTotalRewardFormulasAllowed(bool newValue)
FragmentSpecification & setMultiDimensionalCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setConditionalProbabilityFormulasAllowed(bool newValue)
FragmentSpecification & setRewardBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setLongRunAverageRewardFormulasAllowed(bool newValue)
FragmentSpecification & setLongRunAverageProbabilitiesAllowed(bool newValue)
FragmentSpecification & setTimeBoundedCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setHOAPathFormulasAllowed(bool newValue)
FragmentSpecification & setReachbilityTimeFormulasAllowed(bool newValue)
FragmentSpecification & setTimeAllowed(bool newValue)
FragmentSpecification & setStepBoundedCumulativeRewardFormulasAllowed(bool newValue)
bool isRewardBound() const
ExplicitQualitativeCheckResult & asExplicitQualitativeCheckResult()
bool isBoundSet() const
Retrieves whether there is a bound with which the values for the states will be compared.
bool isOptimizationDirectionSet() const
Retrieves whether an optimization direction was set.
bool isRewardModelSet() const
Retrieves whether a reward model was set.
bool isQualitativeSet() const
Retrieves whether the computation only needs to be performed qualitatively, because the values will o...
std::string const & getRewardModel() const
Retrieves the reward model over which to perform the checking (if set).
FormulaType const & getFormula() const
Retrieves the formula from this task.
ModelCheckerHint const & getHint() const
Retrieves a hint that might contain information that speeds up the modelchecking process (if supporte...
bool isProduceSchedulersSet() const
Retrieves whether scheduler(s) are to be produced (if supported).
storm::logic::Bound const & getBound() const
Retrieves the bound (if set).
storm::OptimizationDirection const & getOptimizationDirection() const
Retrieves the optimization direction (if set).
bool isOnlyInitialStatesRelevantSet() const
Retrieves whether only the initial states are relevant in the computation.
vector_type const & getTruthValuesVector() const
virtual std::unique_ptr< CheckResult > computeLongRunAverageProbabilities(Environment const &env, CheckTask< storm::logic::StateFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeCumulativeRewards(Environment const &env, CheckTask< storm::logic::CumulativeRewardFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeLongRunAverageRewards(Environment const &env, CheckTask< storm::logic::LongRunAverageRewardFormula, SolutionType > const &checkTask) override
static bool canHandleStatic(CheckTask< storm::logic::Formula, SolutionType > const &checkTask, bool *requiresSingleInitialState=nullptr)
Returns false, if this task can certainly not be handled by this model checker (independent of the co...
virtual std::unique_ptr< CheckResult > checkMultiObjectiveFormula(Environment const &env, CheckTask< storm::logic::MultiObjectiveFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeReachabilityTimes(Environment const &env, CheckTask< storm::logic::EventuallyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeLTLProbabilities(Environment const &env, CheckTask< storm::logic::PathFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeGloballyProbabilities(Environment const &env, CheckTask< storm::logic::GloballyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeUntilProbabilities(Environment const &env, CheckTask< storm::logic::UntilFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > checkQuantileFormula(Environment const &env, CheckTask< storm::logic::QuantileFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > checkLexObjectiveFormula(Environment const &env, CheckTask< storm::logic::MultiObjectiveFormula, SolutionType > const &checkTask) override
virtual bool canHandle(CheckTask< storm::logic::Formula, SolutionType > const &checkTask) const override
Determines whether the model checker can handle the given verification task.
virtual std::unique_ptr< CheckResult > computeReachabilityRewards(Environment const &env, CheckTask< storm::logic::EventuallyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeHOAPathProbabilities(Environment const &env, CheckTask< storm::logic::HOAPathFormula, SolutionType > const &checkTask) override
typename std::conditional< std::is_same_v< ValueType, storm::Interval >, double, ValueType >::type SolutionType
virtual std::unique_ptr< CheckResult > computeInstantaneousRewards(Environment const &env, CheckTask< storm::logic::InstantaneousRewardFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeConditionalProbabilities(Environment const &env, CheckTask< storm::logic::ConditionalFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeNextProbabilities(Environment const &env, CheckTask< storm::logic::NextFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeTotalRewards(Environment const &env, CheckTask< storm::logic::TotalRewardFormula, SolutionType > const &checkTask) override
SparseMdpPrctlModelChecker(SparseMdpModelType const &model)
virtual std::unique_ptr< CheckResult > computeBoundedUntilProbabilities(Environment const &env, CheckTask< storm::logic::BoundedUntilFormula, SolutionType > const &checkTask) override
std::vector< ValueType > computeLongRunAverageRewards(Environment const &env, storm::models::sparse::StandardRewardModel< ValueType > const &rewardModel)
Computes the long run average rewards, i.e., the average reward collected per time unit.
std::vector< ValueType > computeLongRunAverageProbabilities(Environment const &env, storm::storage::BitVector const &psiStates)
Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState.
Helper class for LTL model checking.
std::vector< ValueType > computeLTLProbabilities(Environment const &env, storm::logic::PathFormula const &formula, CheckFormulaCallback const &formulaChecker)
Computes the LTL probabilities.
storm::storage::Scheduler< ValueType > extractScheduler(storm::models::sparse::Model< ValueType > const &model)
static std::map< std::string, storm::storage::BitVector > computeApSets(std::map< std::string, std::shared_ptr< storm::logic::Formula const > > const &extracted, CheckFormulaCallback const &formulaChecker)
Computes the states that are satisfying the AP.
std::vector< ValueType > computeDAProductProbabilities(Environment const &env, storm::automata::DeterministicAutomaton const &da, std::map< std::string, storm::storage::BitVector > &apSatSets)
Computes the (maximizing) probabilities for the constructed DA product.
static MDPSparseModelCheckingHelperReturnType< SolutionType > computeReachabilityTimes(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const &hint=ModelCheckerHint())
static MDPSparseModelCheckingHelperReturnType< SolutionType > computeGloballyProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &psiStates, bool qualitative, bool produceScheduler, bool useMecBasedTechnique=false)
static MDPSparseModelCheckingHelperReturnType< SolutionType > computeTotalRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const &hint=ModelCheckerHint())
static std::vector< SolutionType > computeCumulativeRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, RewardModelType const &rewardModel, uint_fast64_t stepBound)
static std::vector< SolutionType > computeInstantaneousRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, RewardModelType const &rewardModel, uint_fast64_t stepCount)
static std::map< storm::storage::sparse::state_type, SolutionType > computeRewardBoundedValues(Environment const &env, OptimizationDirection dir, rewardbounded::MultiDimensionalRewardUnfolding< ValueType, true > &rewardUnfolding, storm::storage::BitVector const &initialStates)
static std::vector< SolutionType > computeNextProbabilities(Environment const &env, OptimizationDirection dir, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::BitVector const &nextStates)
static MDPSparseModelCheckingHelperReturnType< SolutionType > computeUntilProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates, bool qualitative, bool produceScheduler, ModelCheckerHint const &hint=ModelCheckerHint())
static MDPSparseModelCheckingHelperReturnType< SolutionType > computeReachabilityRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, storm::storage::BitVector const &targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const &hint=ModelCheckerHint())
Helper class for model checking queries that depend on the long run behavior of the (nondeterministic...
storm::storage::Scheduler< ValueType > extractScheduler() const
std::vector< ValueType > compute(Environment const &env, storm::solver::SolveGoal< ValueType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates, uint64_t lowerBound, uint64_t upperBound, ModelCheckerHint const &hint=ModelCheckerHint())
std::vector< std::vector< ValueType > > computeQuantile(Environment const &env)
This class defines which action is chosen in a particular state of a non-deterministic model.
#define STORM_LOG_WARN_COND(cond, message)
#define STORM_LOG_THROW(cond, exception, message)
FragmentSpecification prctlstar()
FragmentSpecification propositional()
FragmentSpecification lexObjective()
FragmentSpecification multiObjective()
FragmentSpecification reachability()
FragmentSpecification quantiles()
void setInformationFromCheckTaskNondeterministic(HelperType &helper, storm::modelchecker::CheckTask< FormulaType, typename ModelType::ValueType > const &checkTask, ModelType const &model)
Forwards relevant information stored in the given CheckTask to the given helper.
helper::MDPSparseModelCheckingHelperReturnType< ValueType > check(Environment const &, SparseModelType const &model, CheckTask< storm::logic::MultiObjectiveFormula, ValueType > const &checkTask, CheckFormulaCallback const &formulaChecker)
check a lexicographic LTL-formula
std::unique_ptr< CheckResult > performMultiObjectiveModelChecking(Environment const &env, SparseModelType const &model, storm::logic::MultiObjectiveFormula const &formula)
std::unique_ptr< CheckResult > computeConditionalProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &targetStates, storm::storage::BitVector const &conditionStates)
FilteredRewardModel< RewardModelType > createFilteredRewardModel(RewardModelType const &baseRewardModel, storm::logic::RewardAccumulation const &acc, bool isDiscreteTimeModel)