27namespace modelchecker {
28template<
typename SparseDtmcModelType>
34template<
typename SparseDtmcModelType>
36 bool* requiresSingleInitialState) {
38 if constexpr (storm::IsIntervalType<ValueType>) {
65 if (requiresSingleInitialState) {
66 *requiresSingleInitialState =
true;
75template<
typename SparseDtmcModelType>
77 bool requiresSingleInitialState =
false;
78 if (canHandleStatic(checkTask, &requiresSingleInitialState)) {
79 return !requiresSingleInitialState || this->getModel().getInitialStates().getNumberOfSetBits() == 1;
85template<
typename SparseDtmcModelType>
88 if constexpr (storm::IsIntervalType<ValueType>) {
89 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented bounded until with intervals");
94 "Checking non-trivial bounded until probabilities can only be computed for the initial states of the model.");
99 auto formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(checkTask.
getFormula().asSharedPointer(), opInfo);
101 env, this->getModel(), formula);
104 STORM_LOG_THROW(pathFormula.
hasUpperBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have (a single) upper step bound.");
106 "Formula lower step bound must be discrete/integral.");
108 "Formula needs to have discrete upper step bound.");
109 std::unique_ptr<CheckResult> leftResultPointer = this->check(env, pathFormula.
getLeftSubformula());
110 std::unique_ptr<CheckResult> rightResultPointer = this->check(env, pathFormula.
getRightSubformula());
114 std::vector<ValueType> numericResult =
124template<
typename SparseDtmcModelType>
127 if constexpr (storm::IsIntervalType<ValueType>) {
128 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented next probabilities with intervals");
131 std::unique_ptr<CheckResult> subResultPointer = this->check(env, pathFormula.
getSubformula());
133 std::vector<SolutionType> numericResult =
140template<
typename SparseDtmcModelType>
144 if (storm::IsIntervalType<ValueType>) {
146 "Uncertainty resolution mode must be set for uncertain (interval) models.");
149 storm::exceptions::InvalidSettingsException,
150 "Uncertainty resolution modes robust or cooperative not allowed if optimization direction is not stated explicitly.");
151 STORM_LOG_THROW(this->getModel().getTransitionMatrix().hasOnlyPositiveEntries(), storm::exceptions::InvalidSettingsException,
152 "Computing until probabilities on uncertain model requires graph-preservation.");
154 std::unique_ptr<CheckResult> leftResultPointer = this->check(env, pathFormula.
getLeftSubformula());
155 std::unique_ptr<CheckResult> rightResultPointer = this->check(env, pathFormula.
getRightSubformula());
158 std::vector<SolutionType> numericResult =
166template<
typename SparseDtmcModelType>
169 if constexpr (storm::IsIntervalType<ValueType>) {
170 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented globally probabilities with intervals");
173 std::unique_ptr<CheckResult> subResultPointer = this->check(env, pathFormula.
getSubformula());
175 std::vector<SolutionType> numericResult =
183template<
typename SparseDtmcModelType>
186 if constexpr (storm::IsIntervalType<ValueType>) {
187 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented automata-props with intervals");
195 return this->check(env, formula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
204template<
typename SparseDtmcModelType>
207 if constexpr (storm::IsIntervalType<ValueType>) {
208 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented LTL with interval models");
216 return this->check(env, formula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
224template<
typename SparseDtmcModelType>
228 if constexpr (storm::IsIntervalType<ValueType>) {
229 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented cumulative rewards with intervals");
233 "Checking non-trivial bounded until probabilities can only be computed for the initial states of the model.");
235 "Checking reward bounded cumulative reward formulas is not supported if reward accumulations are given.");
240 auto formula = std::make_shared<storm::logic::RewardOperatorFormula>(checkTask.
getFormula().asSharedPointer(), checkTask.
getRewardModel(), opInfo);
242 env, this->getModel(), formula);
245 STORM_LOG_THROW(rewardPathFormula.
hasIntegerBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have a discrete time bound.");
247 std::vector<SolutionType> numericResult =
259 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"Discounted properties are not implemented for parametric models.");
262template<
typename SparseDtmcModelType>
265 if constexpr (storm::IsIntervalType<ValueType>) {
266 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented discounted cumulative rewards with intervals");
269 STORM_LOG_THROW(rewardPathFormula.
hasIntegerBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have a discrete time bound.");
271 std::vector<SolutionType> numericResult =
279template<
typename SparseDtmcModelType>
282 if constexpr (storm::IsIntervalType<ValueType>) {
283 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented instantaneous rewards with intervals");
286 STORM_LOG_THROW(rewardPathFormula.
hasIntegerBound(), storm::exceptions::InvalidPropertyException,
"Formula needs to have a discrete time bound.");
287 std::vector<SolutionType> numericResult =
291 rewardPathFormula.
getBound<uint64_t>());
296template<
typename SparseDtmcModelType>
300 if (storm::IsIntervalType<ValueType>) {
302 "Uncertainty resolution mode must be set for uncertain (interval) models.");
305 storm::exceptions::InvalidSettingsException,
306 "Uncertainty resolution modes robust or cooperative not allowed if optimization direction is not stated explicitly.");
307 STORM_LOG_THROW(this->getModel().getTransitionMatrix().hasOnlyPositiveEntries(), storm::exceptions::InvalidSettingsException,
308 "Computing rewards on uncertain model requires graph-preservation.");
310 std::unique_ptr<CheckResult> subResultPointer = this->check(env, eventuallyFormula.
getSubformula());
313 std::vector<SolutionType> numericResult =
320template<
typename SparseDtmcModelType>
323 if constexpr (storm::IsIntervalType<ValueType>) {
324 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented reachability times with intervals");
327 std::unique_ptr<CheckResult> subResultPointer = this->check(env, eventuallyFormula.
getSubformula());
329 std::vector<SolutionType> numericResult =
337template<
typename SparseDtmcModelType>
340 if constexpr (storm::IsIntervalType<ValueType>) {
341 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented total rewards with intervals");
344 std::vector<SolutionType> numericResult =
347 this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.
isQualitativeSet(), checkTask.
getHint());
355 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"Discounted properties are not implemented for parametric models.");
358template<
typename SparseDtmcModelType>
361 if constexpr (storm::IsIntervalType<ValueType>) {
362 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented discounted total rewards with intervals");
369 this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.
isQualitativeSet(), discountFactor, checkTask.
getHint());
375template<
typename SparseDtmcModelType>
378 if constexpr (storm::IsIntervalType<ValueType>) {
379 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented LRA probabilities with intervals");
382 std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
393template<
typename SparseDtmcModelType>
396 if constexpr (storm::IsIntervalType<ValueType>) {
397 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented lra with intervals");
407template<
typename SparseDtmcModelType>
410 if constexpr (storm::IsIntervalType<ValueType>) {
411 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented conditional probabilities with intervals");
415 "Illegal conditional probability formula.");
417 "Illegal conditional probability formula.");
424 std::vector<SolutionType> numericResult =
432template<
typename SparseDtmcModelType>
435 if constexpr (storm::IsIntervalType<ValueType>) {
436 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented conditional rewards with intervals");
440 "Illegal conditional probability formula.");
442 "Illegal conditional probability formula.");
449 std::vector<SolutionType> numericResult =
452 this->getModel().getBackwardTransitions(),
462 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"Quantiles for parametric models are not implemented.");
465template<
typename SparseDtmcModelType>
468 if constexpr (storm::IsIntervalType<ValueType>) {
469 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented quantile formulas with intervals");
472 "Computing quantiles is only supported for the initial states of a model.");
473 STORM_LOG_THROW(this->getModel().getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::InvalidOperationException,
474 "Quantiles not supported on models with multiple initial states.");
475 uint64_t initialState = *this->getModel().getInitialStates().begin();
480 if (res.size() == 1 && res.front().size() == 1) {
488template<
typename SparseDtmcModelType>
490 if constexpr (storm::IsIntervalType<ValueType>) {
491 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented steady state distributions with intervals");
497 std::vector<SolutionType> result;
498 auto const& initialStates = this->getModel().getInitialStates();
499 uint64_t numInitStates = initialStates.getNumberOfSetBits();
500 if (numInitStates == 1) {
503 STORM_LOG_WARN(
"Multiple initial states found. A uniform distribution over initial states is assumed.");
504 ValueType initProb = storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(numInitStates);
506 return initialStates.get(stateIndex) ? initProb : storm::utility::zero<ValueType>();
515template<
typename SparseDtmcModelType>
517 if constexpr (storm::IsIntervalType<ValueType>) {
518 STORM_LOG_THROW(
false, storm::exceptions::NotImplementedException,
"We have not yet implemented expected visiting times with intervals");
524 std::vector<SolutionType> result;
525 auto const& initialStates = this->getModel().getInitialStates();
526 uint64_t numInitStates = initialStates.getNumberOfSetBits();
527 STORM_LOG_THROW(numInitStates > 0, storm::exceptions::InvalidOperationException,
"No initial states given. Cannot compute expected visiting times.");
528 STORM_LOG_WARN_COND(numInitStates == 1,
"Multiple initial states found. A uniform distribution over initial states is assumed.");
FragmentSpecification & setConditionalRewardFormulasAllowed(bool newValue)
FragmentSpecification & setRewardBoundedCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setRewardAccumulationAllowed(bool newValue)
FragmentSpecification & setOnlyEventuallyFormuluasInConditionalFormulasAllowed(bool newValue)
FragmentSpecification & setMultiDimensionalBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setTimeOperatorsAllowed(bool newValue)
FragmentSpecification & setTotalRewardFormulasAllowed(bool newValue)
FragmentSpecification & setDiscountedCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setReachabilityRewardFormulasAllowed(bool newValue)
FragmentSpecification & setMultiDimensionalCumulativeRewardFormulasAllowed(bool newValue)
FragmentSpecification & setConditionalProbabilityFormulasAllowed(bool newValue)
FragmentSpecification & setRewardBoundedUntilFormulasAllowed(bool newValue)
FragmentSpecification & setLongRunAverageRewardFormulasAllowed(bool newValue)
FragmentSpecification & setLongRunAverageProbabilitiesAllowed(bool newValue)
FragmentSpecification & setRewardOperatorsAllowed(bool newValue)
FragmentSpecification & setHOAPathFormulasAllowed(bool newValue)
FragmentSpecification & setReachbilityTimeFormulasAllowed(bool newValue)
FragmentSpecification & setDiscountedTotalRewardFormulasAllowed(bool newValue)
bool isRewardBound() const
ExplicitQualitativeCheckResult & asExplicitQualitativeCheckResult()
bool isBoundSet() const
Retrieves whether there is a bound with which the values for the states will be compared.
bool isRewardModelSet() const
Retrieves whether a reward model was set.
bool isQualitativeSet() const
Retrieves whether the computation only needs to be performed qualitatively, because the values will o...
std::string const & getRewardModel() const
Retrieves the reward model over which to perform the checking (if set).
FormulaType const & getFormula() const
Retrieves the formula from this task.
ModelCheckerHint const & getHint() const
Retrieves a hint that might contain information that speeds up the modelchecking process (if supporte...
bool isUncertaintyResolutionModeSet() const
Returns whether the mode, which decides how the uncertainty will be resolved, is set.
storm::logic::Bound const & getBound() const
Retrieves the bound (if set).
bool isOnlyInitialStatesRelevantSet() const
Retrieves whether only the initial states are relevant in the computation.
UncertaintyResolutionMode getUncertaintyResolutionMode() const
Retrieves the mode which decides how the uncertainty will be resolved.
vector_type const & getTruthValuesVector() const
virtual std::unique_ptr< CheckResult > computeReachabilityTimes(Environment const &env, CheckTask< storm::logic::EventuallyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeLTLProbabilities(Environment const &env, CheckTask< storm::logic::PathFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeConditionalProbabilities(Environment const &env, CheckTask< storm::logic::ConditionalFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeNextProbabilities(Environment const &env, CheckTask< storm::logic::NextFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeReachabilityRewards(Environment const &env, CheckTask< storm::logic::EventuallyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeLongRunAverageProbabilities(Environment const &env, CheckTask< storm::logic::StateFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeBoundedUntilProbabilities(Environment const &env, CheckTask< storm::logic::BoundedUntilFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeUntilProbabilities(Environment const &env, CheckTask< storm::logic::UntilFormula, SolutionType > const &checkTask) override
typename std::conditional< std::is_same_v< ValueType, storm::Interval >, double, ValueType >::type SolutionType
std::unique_ptr< CheckResult > computeExpectedVisitingTimes(Environment const &env)
Computes for each state the expected number of times we visit that state.
virtual std::unique_ptr< CheckResult > computeTotalRewards(Environment const &env, CheckTask< storm::logic::TotalRewardFormula, SolutionType > const &checkTask) override
SparseDtmcModelType::ValueType ValueType
virtual std::unique_ptr< CheckResult > computeDiscountedTotalRewards(Environment const &env, CheckTask< storm::logic::DiscountedTotalRewardFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeDiscountedCumulativeRewards(Environment const &env, CheckTask< storm::logic::DiscountedCumulativeRewardFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeHOAPathProbabilities(Environment const &env, CheckTask< storm::logic::HOAPathFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeInstantaneousRewards(Environment const &env, CheckTask< storm::logic::InstantaneousRewardFormula, SolutionType > const &checkTask) override
SparseDtmcPrctlModelChecker(SparseDtmcModelType const &model)
virtual std::unique_ptr< CheckResult > computeCumulativeRewards(Environment const &env, CheckTask< storm::logic::CumulativeRewardFormula, SolutionType > const &checkTask) override
static bool canHandleStatic(CheckTask< storm::logic::Formula, SolutionType > const &checkTask, bool *requiresSingleInitialState=nullptr)
Returns false, if this task can certainly not be handled by this model checker (independent of the co...
virtual std::unique_ptr< CheckResult > computeGloballyProbabilities(Environment const &env, CheckTask< storm::logic::GloballyFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > computeLongRunAverageRewards(Environment const &env, CheckTask< storm::logic::LongRunAverageRewardFormula, SolutionType > const &checkTask) override
virtual bool canHandle(CheckTask< storm::logic::Formula, SolutionType > const &checkTask) const override
Determines whether the model checker can handle the given verification task.
std::unique_ptr< CheckResult > computeSteadyStateDistribution(Environment const &env)
Computes the long run average (or: steady state) distribution over all states Assumes a uniform distr...
virtual std::unique_ptr< CheckResult > computeConditionalRewards(Environment const &env, CheckTask< storm::logic::ConditionalFormula, SolutionType > const &checkTask) override
virtual std::unique_ptr< CheckResult > checkQuantileFormula(Environment const &env, CheckTask< storm::logic::QuantileFormula, SolutionType > const &checkTask) override
Helper class for model checking queries that depend on the long run behavior of the (nondeterministic...
std::vector< ValueType > computeLongRunAverageStateDistribution(Environment const &env)
Computes the long run average state distribution, i.e., a vector that assigns for each state s the av...
std::vector< ValueType > compute(Environment const &env, storm::solver::SolveGoal< ValueType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates, uint64_t lowerBound, uint64_t upperBound, ModelCheckerHint const &hint=ModelCheckerHint())
Helper class for computing for each state the expected number of times to visit that state assuming a...
std::vector< ValueType > computeExpectedVisitingTimes(Environment const &env, storm::storage::BitVector const &initialStates)
Computes for each state the expected number of times we are visiting that state assuming the given in...
static std::vector< SolutionType > computeUntilProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates, bool qualitative, ModelCheckerHint const &hint=ModelCheckerHint())
static std::vector< SolutionType > computeConditionalProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &targetStates, storm::storage::BitVector const &conditionStates, bool qualitative)
static std::vector< SolutionType > computeReachabilityTimes(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &targetStates, bool qualitative, ModelCheckerHint const &hint=ModelCheckerHint())
static std::vector< SolutionType > computeReachabilityRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, storm::storage::BitVector const &targetStates, bool qualitative, ModelCheckerHint const &hint=ModelCheckerHint())
static std::vector< SolutionType > computeDiscountedCumulativeRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, RewardModelType const &rewardModel, uint_fast64_t stepBound, ValueType discountFactor)
static std::vector< SolutionType > computeConditionalRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, storm::storage::BitVector const &targetStates, storm::storage::BitVector const &conditionStates, bool qualitative)
static std::vector< SolutionType > computeDiscountedTotalRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, bool qualitative, ValueType discountFactor, ModelCheckerHint const &hint=ModelCheckerHint())
static std::map< storm::storage::sparse::state_type, SolutionType > computeRewardBoundedValues(Environment const &env, storm::models::sparse::Dtmc< ValueType > const &model, std::shared_ptr< storm::logic::OperatorFormula const > rewardBoundedFormula)
static std::vector< SolutionType > computeGloballyProbabilities(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, storm::storage::BitVector const &psiStates, bool qualitative)
static std::vector< SolutionType > computeTotalRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::SparseMatrix< ValueType > const &backwardTransitions, RewardModelType const &rewardModel, bool qualitative, ModelCheckerHint const &hint=ModelCheckerHint())
static std::vector< SolutionType > computeCumulativeRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, RewardModelType const &rewardModel, uint_fast64_t stepBound)
static std::vector< SolutionType > computeInstantaneousRewards(Environment const &env, storm::solver::SolveGoal< ValueType, SolutionType > &&goal, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, RewardModelType const &rewardModel, uint_fast64_t stepCount)
static std::vector< SolutionType > computeNextProbabilities(Environment const &env, storm::storage::SparseMatrix< ValueType > const &transitionMatrix, storm::storage::BitVector const &nextStates)
std::vector< ValueType > computeLongRunAverageRewards(Environment const &env, storm::models::sparse::StandardRewardModel< ValueType > const &rewardModel)
Computes the long run average rewards, i.e., the average reward collected per time unit.
std::vector< ValueType > computeLongRunAverageProbabilities(Environment const &env, storm::storage::BitVector const &psiStates)
Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState.
Helper class for LTL model checking.
std::vector< ValueType > computeLTLProbabilities(Environment const &env, storm::logic::PathFormula const &formula, CheckFormulaCallback const &formulaChecker)
Computes the LTL probabilities.
static std::map< std::string, storm::storage::BitVector > computeApSets(std::map< std::string, std::shared_ptr< storm::logic::Formula const > > const &extracted, CheckFormulaCallback const &formulaChecker)
Computes the states that are satisfying the AP.
std::vector< ValueType > computeDAProductProbabilities(Environment const &env, storm::automata::DeterministicAutomaton const &da, std::map< std::string, storm::storage::BitVector > &apSatSets)
Computes the (maximizing) probabilities for the constructed DA product.
std::vector< std::vector< ValueType > > computeQuantile(Environment const &env)
#define STORM_LOG_WARN(message)
#define STORM_LOG_WARN_COND(cond, message)
#define STORM_LOG_THROW(cond, exception, message)
FragmentSpecification prctlstar()
FragmentSpecification propositional()
FragmentSpecification reachability()
FragmentSpecification quantiles()
void setInformationFromCheckTaskDeterministic(HelperType &helper, storm::modelchecker::CheckTask< FormulaType, typename ModelType::ValueType > const &checkTask, ModelType const &model)
Forwards relevant information stored in the given CheckTask to the given helper.
FilteredRewardModel< RewardModelType > createFilteredRewardModel(RewardModelType const &baseRewardModel, storm::logic::RewardAccumulation const &acc, bool isDiscreteTimeModel)