Storm
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
SparseMultiObjectivePreprocessor.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <set>
5
19#include "storm/utility/graph.h"
22
26
27namespace storm {
28namespace modelchecker {
29namespace multiobjective {
30namespace preprocessing {
31
32template<typename SparseModelType>
34 Environment const& env, SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula) {
35 std::shared_ptr<SparseModelType> model;
36
37 // Incorporate the necessary memory
39 auto const& schedRestr = env.modelchecker().multi().getSchedulerRestriction();
40 if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::GoalMemory) {
42 } else if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::Arbitrary && schedRestr.getMemoryStates() > 1) {
43 model = storm::transformer::MemoryIncorporation<SparseModelType>::incorporateFullMemory(originalModel, schedRestr.getMemoryStates());
44 } else if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::Counter && schedRestr.getMemoryStates() > 1) {
45 model = storm::transformer::MemoryIncorporation<SparseModelType>::incorporateCountingMemory(originalModel, schedRestr.getMemoryStates());
46 } else if (schedRestr.isPositional()) {
47 model = std::make_shared<SparseModelType>(originalModel);
48 } else {
49 STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "The given scheduler restriction has not been implemented.");
50 }
51 } else {
53 }
54
55 // Remove states that are irrelevant for all properties (e.g. because they are only reachable via goal states
56 boost::optional<std::string> deadlockLabel;
57 removeIrrelevantStates(model, deadlockLabel, originalFormula);
58
59 PreprocessorData data(model);
60 data.deadlockLabel = deadlockLabel;
61
62 // Invoke preprocessing on the individual objectives
63 for (auto const& subFormula : originalFormula.getSubformulas()) {
64 STORM_LOG_INFO("Preprocessing objective " << *subFormula << ".");
65 data.objectives.push_back(std::make_shared<Objective<ValueType>>());
66 data.objectives.back()->originalFormula = subFormula;
67 data.finiteRewardCheckObjectives.resize(data.objectives.size(), false);
68 data.upperResultBoundObjectives.resize(data.objectives.size(), false);
69 STORM_LOG_THROW(data.objectives.back()->originalFormula->isOperatorFormula(), storm::exceptions::InvalidPropertyException,
70 "Could not preprocess the subformula " << *subFormula << " of " << originalFormula << " because it is not supported");
71 preprocessOperatorFormula(data.objectives.back()->originalFormula->asOperatorFormula(), data);
72 }
73
74 // Remove reward models that are not needed anymore
75 std::set<std::string> relevantRewardModels;
76 for (auto const& obj : data.objectives) {
77 obj->formula->gatherReferencedRewardModels(relevantRewardModels);
78 }
79 data.model->restrictRewardModels(relevantRewardModels);
80
81 // Build the actual result
82 return buildResult(originalModel, originalFormula, data);
83}
84
85template<typename SparseModelType>
87 // Get the complement of the states that are reachable without visiting phi
88 auto result =
89 storm::utility::graph::getReachableStates(model.getTransitionMatrix(), model.getInitialStates(), ~phi, storm::storage::BitVector(phi.size(), false));
90 result.complement();
91 assert(phi.isSubsetOf(result));
92 return result;
93}
94
95template<typename SparseModelType>
96void SparseMultiObjectivePreprocessor<SparseModelType>::removeIrrelevantStates(std::shared_ptr<SparseModelType>& model,
97 boost::optional<std::string>& deadlockLabel,
98 storm::logic::MultiObjectiveFormula const& originalFormula) {
99 storm::storage::BitVector absorbingStates(model->getNumberOfStates(), true);
100
102 storm::storage::SparseMatrix<ValueType> backwardTransitions = model->getBackwardTransitions();
103
104 for (auto const& opFormula : originalFormula.getSubformulas()) {
105 // Compute a set of states from which we can make any subset absorbing without affecting this subformula
106 storm::storage::BitVector absorbingStatesForSubformula;
107 STORM_LOG_THROW(opFormula->isOperatorFormula(), storm::exceptions::InvalidPropertyException,
108 "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported");
109 auto const& pathFormula = opFormula->asOperatorFormula().getSubformula();
110 if (opFormula->isProbabilityOperatorFormula()) {
111 if (pathFormula.isUntilFormula()) {
112 auto lhs = mc.check(pathFormula.asUntilFormula().getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
113 auto rhs = mc.check(pathFormula.asUntilFormula().getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
114 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
115 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
116 } else if (pathFormula.isBoundedUntilFormula()) {
117 if (pathFormula.asBoundedUntilFormula().hasMultiDimensionalSubformulas()) {
118 absorbingStatesForSubformula = storm::storage::BitVector(model->getNumberOfStates(), true);
119 storm::storage::BitVector absorbingStatesForSubSubformula;
120 for (uint64_t i = 0; i < pathFormula.asBoundedUntilFormula().getDimension(); ++i) {
121 auto subPathFormula = pathFormula.asBoundedUntilFormula().restrictToDimension(i);
122 auto lhs =
123 mc.check(pathFormula.asBoundedUntilFormula().getLeftSubformula(i))->asExplicitQualitativeCheckResult().getTruthValuesVector();
124 auto rhs =
125 mc.check(pathFormula.asBoundedUntilFormula().getRightSubformula(i))->asExplicitQualitativeCheckResult().getTruthValuesVector();
126 absorbingStatesForSubSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
127 if (pathFormula.asBoundedUntilFormula().hasLowerBound(i)) {
128 absorbingStatesForSubSubformula |= getOnlyReachableViaPhi(*model, ~lhs);
129 } else {
130 absorbingStatesForSubSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
131 }
132 absorbingStatesForSubformula &= absorbingStatesForSubSubformula;
133 }
134 } else {
135 auto lhs = mc.check(pathFormula.asBoundedUntilFormula().getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
136 auto rhs = mc.check(pathFormula.asBoundedUntilFormula().getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
137 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
138 if (pathFormula.asBoundedUntilFormula().hasLowerBound()) {
139 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs);
140 } else {
141 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
142 }
143 }
144 } else if (pathFormula.isGloballyFormula()) {
145 auto phi = mc.check(pathFormula.asGloballyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
146 auto notPhi = ~phi;
147 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, phi, notPhi);
148 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, notPhi);
149 } else if (pathFormula.isEventuallyFormula()) {
150 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
151 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, ~phi, phi);
152 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, phi);
153 } else {
154 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
155 }
156 } else if (opFormula->isRewardOperatorFormula()) {
157 auto const& baseRewardModel = opFormula->asRewardOperatorFormula().hasRewardModelName()
158 ? model->getRewardModel(opFormula->asRewardOperatorFormula().getRewardModelName())
159 : model->getUniqueRewardModel();
160 if (pathFormula.isEventuallyFormula()) {
161 auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asEventuallyFormula());
162 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
163 // Make states that can not reach a state with non-zero reward absorbing
164 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
165 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
166 // Make states that reach phi with prob 1 while only visiting states with reward 0 absorbing
167 absorbingStatesForSubformula |= storm::utility::graph::performProb1A(
168 model->getTransitionMatrix(), model->getTransitionMatrix().getRowGroupIndices(), backwardTransitions, statesWithoutReward, phi);
169 // Make states that are only reachable via phi absorbing
170 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, phi);
171 } else if (pathFormula.isCumulativeRewardFormula()) {
172 auto rewardModel =
173 storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asCumulativeRewardFormula());
174 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
175 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
176 } else if (pathFormula.isTotalRewardFormula()) {
177 auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asTotalRewardFormula());
178 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
179 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
180 } else if (pathFormula.isLongRunAverageRewardFormula()) {
181 auto rewardModel =
182 storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asLongRunAverageRewardFormula());
183 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
184 // Compute Sat(Forall F (Forall G "statesWithoutReward"))
185 auto forallGloballyStatesWithoutReward = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
186 absorbingStatesForSubformula =
187 storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions,
188 storm::storage::BitVector(model->getNumberOfStates(), true), forallGloballyStatesWithoutReward);
189 } else {
190 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
191 }
192 } else if (opFormula->isTimeOperatorFormula()) {
193 if (pathFormula.isEventuallyFormula()) {
194 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
195 absorbingStatesForSubformula = getOnlyReachableViaPhi(*model, phi);
196 } else {
197 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
198 }
199 } else if (opFormula->isLongRunAverageOperatorFormula()) {
200 auto lraStates = mc.check(pathFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
201 // Compute Sat(Forall F (Forall G not "lraStates"))
202 auto forallGloballyNotLraStates = storm::utility::graph::performProb0A(backwardTransitions, ~lraStates, lraStates);
203 absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(),
204 backwardTransitions, ~lraStates, forallGloballyNotLraStates);
205 } else {
206 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
207 "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported");
208 }
209 absorbingStates &= absorbingStatesForSubformula;
210 if (absorbingStates.empty()) {
211 break;
212 }
213 }
214
215 if (!absorbingStates.empty()) {
216 // We can make the states absorbing and delete unreachable states.
217 storm::storage::BitVector subsystemActions(model->getNumberOfChoices(), true);
218 for (auto absorbingState : absorbingStates) {
219 for (uint64_t action = model->getTransitionMatrix().getRowGroupIndices()[absorbingState];
220 action < model->getTransitionMatrix().getRowGroupIndices()[absorbingState + 1]; ++action) {
221 subsystemActions.set(action, false);
222 }
223 }
225 options.fixDeadlocks = true;
226 auto const& submodel =
227 storm::transformer::buildSubsystem(*model, storm::storage::BitVector(model->getNumberOfStates(), true), subsystemActions, false, options);
228 STORM_LOG_INFO("Making states absorbing reduced the state space from " << model->getNumberOfStates() << " to " << submodel.model->getNumberOfStates()
229 << ".");
230 model = submodel.model->template as<SparseModelType>();
231 deadlockLabel = submodel.deadlockLabel;
232 }
233}
234
235template<typename SparseModelType>
236SparseMultiObjectivePreprocessor<SparseModelType>::PreprocessorData::PreprocessorData(std::shared_ptr<SparseModelType> model) : model(model) {
237 // The rewardModelNamePrefix should not be a prefix of a reward model name of the given model to ensure uniqueness of new reward model names
238 rewardModelNamePrefix = "obj";
239 while (true) {
240 bool prefixIsUnique = true;
241 for (auto const& rewardModels : model->getRewardModels()) {
242 if (rewardModelNamePrefix.size() <= rewardModels.first.size()) {
243 if (std::mismatch(rewardModelNamePrefix.begin(), rewardModelNamePrefix.end(), rewardModels.first.begin()).first ==
244 rewardModelNamePrefix.end()) {
245 prefixIsUnique = false;
246 rewardModelNamePrefix = "_" + rewardModelNamePrefix;
247 break;
248 }
249 }
250 }
251 if (prefixIsUnique) {
252 break;
253 }
254 }
255}
256
259 if (formula.hasBound()) {
260 opInfo.bound = formula.getBound();
261 // Invert the bound (if necessary)
262 if (considerComplementaryEvent) {
263 opInfo.bound->threshold = opInfo.bound->threshold.getManager().rational(storm::utility::one<storm::RationalNumber>()) - opInfo.bound->threshold;
264 switch (opInfo.bound->comparisonType) {
266 opInfo.bound->comparisonType = storm::logic::ComparisonType::Less;
267 break;
269 opInfo.bound->comparisonType = storm::logic::ComparisonType::LessEqual;
270 break;
272 opInfo.bound->comparisonType = storm::logic::ComparisonType::Greater;
273 break;
275 opInfo.bound->comparisonType = storm::logic::ComparisonType::GreaterEqual;
276 break;
277 default:
278 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Current objective " << formula << " has unexpected comparison type");
279 }
280 }
281 if (storm::logic::isLowerBound(opInfo.bound->comparisonType)) {
282 opInfo.optimalityType = storm::solver::OptimizationDirection::Maximize;
283 } else {
284 opInfo.optimalityType = storm::solver::OptimizationDirection::Minimize;
285 }
287 "Optimization direction of formula " << formula << " ignored as the formula also specifies a threshold.");
288 } else if (formula.hasOptimalityType()) {
289 opInfo.optimalityType = formula.getOptimalityType();
290 // Invert the optimality type (if necessary)
291 if (considerComplementaryEvent) {
293 }
294 } else {
295 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Objective " << formula << " does not specify whether to minimize or maximize");
296 }
297 return opInfo;
298}
299
300template<typename SparseModelType>
301void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data) {
302 Objective<ValueType>& objective = *data.objectives.back();
303
304 // Check whether the complementary event is considered
306
307 // Extract the operator information from the formula and potentially invert it for the complementary event
308 storm::logic::OperatorInformation opInfo = getOperatorInformation(formula, objective.considersComplementaryEvent);
309
310 if (formula.isProbabilityOperatorFormula()) {
311 preprocessProbabilityOperatorFormula(formula.asProbabilityOperatorFormula(), opInfo, data);
312 } else if (formula.isRewardOperatorFormula()) {
313 preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data);
314 } else if (formula.isTimeOperatorFormula()) {
315 preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data);
316 } else if (formula.isLongRunAverageOperatorFormula()) {
317 preprocessLongRunAverageOperatorFormula(formula.asLongRunAverageOperatorFormula(), opInfo, data);
318 } else {
319 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported");
320 }
321}
322
323template<typename SparseModelType>
324void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula,
326 PreprocessorData& data) {
327 // Probabilities are between zero and one
328 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
329 data.objectives.back()->upperResultBound = storm::utility::one<ValueType>();
330
331 if (formula.getSubformula().isUntilFormula()) {
332 preprocessUntilFormula(formula.getSubformula().asUntilFormula(), opInfo, data);
333 } else if (formula.getSubformula().isBoundedUntilFormula()) {
334 preprocessBoundedUntilFormula(formula.getSubformula().asBoundedUntilFormula(), opInfo, data);
335 } else if (formula.getSubformula().isGloballyFormula()) {
336 preprocessGloballyFormula(formula.getSubformula().asGloballyFormula(), opInfo, data);
337 } else if (formula.getSubformula().isEventuallyFormula()) {
338 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data);
339 } else {
340 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
341 }
342}
343
344template<typename SparseModelType>
345void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula,
347 PreprocessorData& data) {
348 std::string rewardModelName;
349 if (formula.hasRewardModelName()) {
350 rewardModelName = formula.getRewardModelName();
351 STORM_LOG_THROW(data.model->hasRewardModel(rewardModelName), storm::exceptions::InvalidPropertyException,
352 "The reward model specified by formula " << formula << " does not exist in the model");
353 } else {
354 // We have to assert that a unique reward model exists, and we need to find its name.
355 // However, we might have added auxiliary reward models for other objectives which we have to filter out here.
356 auto prefixOf = [](std::string const& left, std::string const& right) {
357 return std::mismatch(left.begin(), left.end(), right.begin()).first == left.end();
358 };
359 bool uniqueRewardModelFound = false;
360 for (auto const& rewModel : data.model->getRewardModels()) {
361 if (prefixOf(data.rewardModelNamePrefix, rewModel.first)) {
362 // Skip auxiliary reward model
363 continue;
364 }
365 STORM_LOG_THROW(!uniqueRewardModelFound, storm::exceptions::InvalidOperationException,
366 "The formula " << formula << " does not specify a reward model name and the reward model is not unique.");
367 uniqueRewardModelFound = true;
368 rewardModelName = rewModel.first;
369 }
370 STORM_LOG_THROW(uniqueRewardModelFound, storm::exceptions::InvalidOperationException,
371 "The formula " << formula << " refers to an unnamed reward model but no reward model has been defined.");
372 }
373
374 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
375
376 if (formula.getSubformula().isEventuallyFormula()) {
377 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data, rewardModelName);
378 } else if (formula.getSubformula().isCumulativeRewardFormula()) {
379 preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName);
380 } else if (formula.getSubformula().isTotalRewardFormula()) {
381 preprocessTotalRewardFormula(formula.getSubformula().asTotalRewardFormula(), opInfo, data, rewardModelName);
382 } else if (formula.getSubformula().isLongRunAverageRewardFormula()) {
383 preprocessLongRunAverageRewardFormula(formula.getSubformula().asLongRunAverageRewardFormula(), opInfo, data, rewardModelName);
384 } else {
385 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
386 }
387}
388
389template<typename SparseModelType>
390void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula,
391 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
392 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
393
394 if (formula.getSubformula().isEventuallyFormula()) {
395 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data);
396 } else {
397 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
398 }
399}
400
401template<typename SparseModelType>
402void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula,
404 PreprocessorData& data) {
405 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
406 data.objectives.back()->upperResultBound = storm::utility::one<ValueType>();
407
408 // Convert to a long run average reward formula
409 // Create and add the new formula
410 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
411 auto lraRewardFormula = std::make_shared<storm::logic::LongRunAverageRewardFormula>();
412 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(lraRewardFormula, rewardModelName, opInfo);
413
414 // Create and add the new reward model that only gives one reward for goal states
416 storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
417 std::vector<typename SparseModelType::ValueType> lraRewards(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>());
418 storm::utility::vector::setVectorValues(lraRewards, subFormulaResult, storm::utility::one<typename SparseModelType::ValueType>());
419 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(lraRewards)));
420}
421
422template<typename SparseModelType>
423void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessUntilFormula(storm::logic::UntilFormula const& formula,
424 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
425 std::shared_ptr<storm::logic::Formula const> subformula) {
426 // Try to transform the formula to expected total (or cumulative) rewards
427
429 storm::storage::BitVector rightSubformulaResult = mc.check(formula.getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
430 // Check if the formula is already satisfied in the initial state because then the transformation to expected rewards will fail.
431 // TODO: Handle this case more properly
432 STORM_LOG_THROW((data.model->getInitialStates() & rightSubformulaResult).empty(), storm::exceptions::NotImplementedException,
433 "The Probability for the objective "
434 << *data.objectives.back()->originalFormula
435 << " is always one as the rhs of the until formula is true in the initial state. This (trivial) case is currently not implemented.");
436
437 // Whenever a state that violates the left subformula or satisfies the right subformula is reached, the objective is 'decided', i.e., no more reward should
438 // be collected from there
439 storm::storage::BitVector notLeftOrRight = mc.check(formula.getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
440 notLeftOrRight.complement();
441 notLeftOrRight |= rightSubformulaResult;
442
443 // Get the states that are reachable from a notLeftOrRight state
444 storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false);
445 storm::storage::BitVector reachableFromGoal =
446 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), notLeftOrRight, allStates, noStates);
447 // Get the states that are reachable from an initial state, stopping at the states reachable from goal
448 storm::storage::BitVector reachableFromInit =
449 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), ~notLeftOrRight, reachableFromGoal);
450 // Exclude the actual notLeftOrRight states from the states that are reachable from init
451 reachableFromInit &= ~notLeftOrRight;
452 // If we can reach a state that is reachable from goal, but which is not a goal state, it means that the transformation to expected rewards is not possible.
453 if ((reachableFromInit & reachableFromGoal).empty()) {
454 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " is transformed to an expected total/cumulative reward property.");
455 // Transform to expected total rewards:
456 // build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from a reachableFromInit state to a
457 // goalState
458 std::vector<typename SparseModelType::ValueType> objectiveRewards(data.model->getTransitionMatrix().getRowCount(),
459 storm::utility::zero<typename SparseModelType::ValueType>());
460 for (auto state : reachableFromInit) {
461 for (uint_fast64_t row = data.model->getTransitionMatrix().getRowGroupIndices()[state];
462 row < data.model->getTransitionMatrix().getRowGroupIndices()[state + 1]; ++row) {
463 objectiveRewards[row] = data.model->getTransitionMatrix().getConstrainedRowSum(row, rightSubformulaResult);
464 }
465 }
466 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
467 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::nullopt, std::move(objectiveRewards)));
468 if (subformula == nullptr) {
469 subformula = std::make_shared<storm::logic::TotalRewardFormula>();
470 }
471 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(subformula, rewardModelName, opInfo);
472 } else {
473 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property.");
474 data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo);
475 }
476}
477
478template<typename SparseModelType>
479void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula,
480 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
481 // Check how to handle this query
482 if (formula.isMultiDimensional() || formula.getTimeBoundReference().isRewardBound()) {
483 STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " is not transformed to an expected cumulative reward property.");
484 data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo);
485 } else if (!formula.hasLowerBound() || (!formula.isLowerBoundStrict() && storm::utility::isZero(formula.template getLowerBound<storm::RationalNumber>()))) {
486 std::shared_ptr<storm::logic::Formula const> subformula;
487 if (!formula.hasUpperBound()) {
488 // The formula is actually unbounded
489 subformula = std::make_shared<storm::logic::TotalRewardFormula>();
490 } else {
492 storm::exceptions::InvalidPropertyException,
493 "Bounded until formulas for Markov Automata are only allowed when time bounds are considered.");
494 storm::logic::TimeBound bound(formula.isUpperBoundStrict(), formula.getUpperBound());
495 subformula = std::make_shared<storm::logic::CumulativeRewardFormula>(bound, formula.getTimeBoundReference());
496 }
498 opInfo, data, subformula);
499 } else {
500 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Property " << formula << "is not supported");
501 }
502}
503
504template<typename SparseModelType>
505void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessGloballyFormula(storm::logic::GloballyFormula const& formula,
506 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
507 // The formula is transformed to an until formula for the complementary event.
508 auto negatedSubformula = std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not,
509 formula.getSubformula().asSharedPointer());
510
511 preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), negatedSubformula), opInfo, data);
512}
513
514template<typename SparseModelType>
515void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula,
516 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
517 boost::optional<std::string> const& optionalRewardModelName) {
518 if (formula.isReachabilityProbabilityFormula()) {
519 preprocessUntilFormula(
520 *std::make_shared<storm::logic::UntilFormula>(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), opInfo, data);
521 return;
522 }
523
524 // Analyze the subformula
526 storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
527
528 // Get the states that are reachable from a goal state
529 storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false);
530 storm::storage::BitVector reachableFromGoal =
531 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), subFormulaResult, allStates, noStates);
532 // Get the states that are reachable from an initial state, stopping at the states reachable from goal
533 storm::storage::BitVector reachableFromInit =
534 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), allStates, reachableFromGoal);
535 // Exclude the actual goal states from the states that are reachable from an initial state
536 reachableFromInit &= ~subFormulaResult;
537 // If we can reach a state that is reachable from goal but which is not a goal state, it means that the transformation to expected total rewards is not
538 // possible.
539 if ((reachableFromInit & reachableFromGoal).empty()) {
540 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " is transformed to an expected total reward property.");
541 // Transform to expected total rewards:
542
543 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
544 auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>();
545 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, rewardModelName, opInfo);
546
547 if (formula.isReachabilityRewardFormula()) {
548 // build stateAction reward vector that only gives reward for states that are reachable from init
549 assert(optionalRewardModelName.is_initialized());
550 auto objectiveRewards =
551 storm::utility::createFilteredRewardModel(data.model->getRewardModel(optionalRewardModelName.get()), data.model->isDiscreteTimeModel(), formula)
552 .extract();
553 // get rid of potential transition rewards
554 objectiveRewards.reduceToStateBasedRewards(data.model->getTransitionMatrix(), false);
555 if (objectiveRewards.hasStateRewards()) {
556 storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), reachableFromGoal,
557 storm::utility::zero<typename SparseModelType::ValueType>());
558 }
559 if (objectiveRewards.hasStateActionRewards()) {
560 for (auto state : reachableFromGoal) {
561 std::fill_n(objectiveRewards.getStateActionRewardVector().begin() + data.model->getTransitionMatrix().getRowGroupIndices()[state],
562 data.model->getTransitionMatrix().getRowGroupSize(state), storm::utility::zero<typename SparseModelType::ValueType>());
563 }
564 }
565 data.model->addRewardModel(rewardModelName, std::move(objectiveRewards));
566 } else if (formula.isReachabilityTimeFormula()) {
567 // build state reward vector that only gives reward for relevant states
568 std::vector<typename SparseModelType::ValueType> timeRewards(data.model->getNumberOfStates(),
569 storm::utility::zero<typename SparseModelType::ValueType>());
570 if (data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) {
572 timeRewards,
574 reachableFromInit,
575 storm::utility::one<typename SparseModelType::ValueType>());
576 } else {
577 storm::utility::vector::setVectorValues(timeRewards, reachableFromInit, storm::utility::one<typename SparseModelType::ValueType>());
578 }
579 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards)));
580 } else {
581 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
582 "The formula " << formula << " neither considers reachability probabilities nor reachability rewards "
583 << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "")
584 << ". This is not supported.");
585 }
586 } else {
587 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property.");
588 if (formula.isReachabilityRewardFormula()) {
589 // TODO: this probably needs some better treatment regarding schedulers that do not reach the goal state allmost surely
590 assert(optionalRewardModelName.is_initialized());
591 if (data.deadlockLabel) {
592 // We made some states absorbing and created a new deadlock state. To make sure that this deadlock state gets value zero, we add it to the set
593 // of goal states of the formula.
594 std::shared_ptr<storm::logic::Formula const> newSubSubformula =
595 std::make_shared<storm::logic::AtomicLabelFormula const>(data.deadlockLabel.get());
596 std::shared_ptr<storm::logic::Formula const> newSubformula = std::make_shared<storm::logic::BinaryBooleanStateFormula const>(
597 storm::logic::BinaryBooleanStateFormula::OperatorType::Or, formula.getSubformula().asSharedPointer(), newSubSubformula);
598 boost::optional<storm::logic::RewardAccumulation> newRewardAccumulation;
599 if (formula.hasRewardAccumulation()) {
600 newRewardAccumulation = formula.getRewardAccumulation();
601 }
602 std::shared_ptr<storm::logic::Formula const> newFormula =
603 std::make_shared<storm::logic::EventuallyFormula const>(newSubformula, formula.getContext(), newRewardAccumulation);
604 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, optionalRewardModelName.get(), opInfo);
605 } else {
606 data.objectives.back()->formula =
607 std::make_shared<storm::logic::RewardOperatorFormula>(formula.asSharedPointer(), optionalRewardModelName.get(), opInfo);
608 }
609 } else if (formula.isReachabilityTimeFormula()) {
610 // Reduce to reachability rewards so that time formulas do not have to be treated seperately later.
611 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
612 std::shared_ptr<storm::logic::Formula const> newSubformula = formula.getSubformula().asSharedPointer();
613 if (data.deadlockLabel) {
614 // We made some states absorbing and created a new deadlock state. To make sure that this deadlock state gets value zero, we add it to the set
615 // of goal states of the formula.
616 std::shared_ptr<storm::logic::Formula const> newSubSubformula =
617 std::make_shared<storm::logic::AtomicLabelFormula const>(data.deadlockLabel.get());
618 newSubformula = std::make_shared<storm::logic::BinaryBooleanStateFormula const>(storm::logic::BinaryBooleanStateFormula::OperatorType::Or,
619 formula.getSubformula().asSharedPointer(), newSubSubformula);
620 }
621 auto newFormula = std::make_shared<storm::logic::EventuallyFormula>(newSubformula, storm::logic::FormulaContext::Reward);
622 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, rewardModelName, opInfo);
623 std::vector<typename SparseModelType::ValueType> timeRewards;
624 if (data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) {
625 timeRewards.assign(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>());
627 timeRewards,
629 storm::utility::one<typename SparseModelType::ValueType>());
630 } else {
631 timeRewards.assign(data.model->getNumberOfStates(), storm::utility::one<typename SparseModelType::ValueType>());
632 }
633 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards)));
634 } else {
635 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
636 "The formula " << formula << " neither considers reachability probabilities nor reachability rewards "
637 << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "")
638 << ". This is not supported.");
639 }
640 }
641 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
642}
643
644template<typename SparseModelType>
645void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula,
647 PreprocessorData& data,
648 boost::optional<std::string> const& optionalRewardModelName) {
649 STORM_LOG_THROW(data.model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::InvalidPropertyException,
650 "Cumulative reward formulas are not supported for the given model type.");
651 std::string rewardModelName = optionalRewardModelName.get();
652 // Strip away potential RewardAccumulations in the formula itself but also in reward bounds
653 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
654 if (filteredRewards.isDifferentFromUnfilteredModel()) {
655 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
656 data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract()));
657 }
658
659 std::vector<storm::logic::TimeBoundReference> newTimeBoundReferences;
660 bool onlyRewardBounds = true;
661 for (uint64_t i = 0; i < formula.getDimension(); ++i) {
662 auto oldTbr = formula.getTimeBoundReference(i);
663 if (oldTbr.isRewardBound()) {
664 if (oldTbr.hasRewardAccumulation()) {
665 auto filteredBoundRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(oldTbr.getRewardName()),
666 oldTbr.getRewardAccumulation(), data.model->isDiscreteTimeModel());
667 if (filteredBoundRewards.isDifferentFromUnfilteredModel()) {
668 std::string freshRewardModelName =
669 data.rewardModelNamePrefix + std::to_string(data.objectives.size()) + std::string("_" + std::to_string(i));
670 data.model->addRewardModel(freshRewardModelName, std::move(filteredBoundRewards.extract()));
671 newTimeBoundReferences.emplace_back(freshRewardModelName);
672 } else {
673 // Strip away the reward accumulation
674 newTimeBoundReferences.emplace_back(oldTbr.getRewardName());
675 }
676 } else {
677 newTimeBoundReferences.push_back(oldTbr);
678 }
679 } else {
680 onlyRewardBounds = false;
681 newTimeBoundReferences.push_back(oldTbr);
682 }
683 }
684
685 auto newFormula = std::make_shared<storm::logic::CumulativeRewardFormula>(formula.getBounds(), newTimeBoundReferences);
686 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, rewardModelName, opInfo);
687
688 if (onlyRewardBounds) {
689 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
690 }
691}
692
693template<typename SparseModelType>
694void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTotalRewardFormula(storm::logic::TotalRewardFormula const& formula,
695 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
696 boost::optional<std::string> const& optionalRewardModelName) {
697 std::string rewardModelName = optionalRewardModelName.get();
698 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
699 if (filteredRewards.isDifferentFromUnfilteredModel()) {
700 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
701 data.model->addRewardModel(rewardModelName, filteredRewards.extract());
702 }
703 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(formula.stripRewardAccumulation(), rewardModelName, opInfo);
704 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
705}
706
707template<typename SparseModelType>
708void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageRewardFormula(storm::logic::LongRunAverageRewardFormula const& formula,
710 PreprocessorData& data,
711 boost::optional<std::string> const& optionalRewardModelName) {
712 std::string rewardModelName = optionalRewardModelName.get();
713 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
714 if (filteredRewards.isDifferentFromUnfilteredModel()) {
715 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
716 data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract()));
717 }
718 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(formula.stripRewardAccumulation(), rewardModelName, opInfo);
719}
720
721template<typename SparseModelType>
722typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::buildResult(
723 SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data) {
724 ReturnType result(originalFormula, originalModel);
725 auto backwardTransitions = data.model->getBackwardTransitions();
726 result.preprocessedModel = data.model;
727
728 for (auto& obj : data.objectives) {
729 result.objectives.push_back(std::move(*obj));
730 }
731 result.queryType = getQueryType(result.objectives);
732 result.maybeInfiniteRewardObjectives = std::move(data.finiteRewardCheckObjectives);
733
734 return result;
735}
736
737template<typename SparseModelType>
738typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType::QueryType SparseMultiObjectivePreprocessor<SparseModelType>::getQueryType(
739 std::vector<Objective<ValueType>> const& objectives) {
740 uint_fast64_t numOfObjectivesWithThreshold = 0;
741 for (auto& obj : objectives) {
742 if (obj.formula->hasBound()) {
743 ++numOfObjectivesWithThreshold;
744 }
745 }
746 if (numOfObjectivesWithThreshold == objectives.size()) {
747 return ReturnType::QueryType::Achievability;
748 } else if (numOfObjectivesWithThreshold + 1 == objectives.size()) {
749 // Note: We do not want to consider a Pareto query when the total number of objectives is one.
750 return ReturnType::QueryType::Quantitative;
751 } else if (numOfObjectivesWithThreshold == 0) {
752 return ReturnType::QueryType::Pareto;
753 } else {
754 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
755 "Invalid Multi-objective query: The numer of qualitative objectives should be either 0 (Pareto query), 1 (quantitative query), or "
756 "#objectives (achievability query).");
757 }
758}
759
760template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<double>>;
761template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<double>>;
762
763template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<storm::RationalNumber>>;
764template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>;
765} // namespace preprocessing
766} // namespace multiobjective
767} // namespace modelchecker
768} // namespace storm
ModelCheckerEnvironment & modelchecker()
MultiObjectiveModelCheckerEnvironment & multi()
storm::storage::SchedulerClass const & getSchedulerRestriction() const
Formula const & getRightSubformula() const
Formula const & getLeftSubformula() const
TimeBoundReference const & getTimeBoundReference(unsigned i=0) const
bool isLowerBoundStrict(unsigned i=0) const
storm::expressions::Expression const & getUpperBound(unsigned i=0) const
bool isUpperBoundStrict(unsigned i=0) const
TimeBoundReference const & getTimeBoundReference() const
std::vector< TimeBound > const & getBounds() const
virtual bool isReachabilityRewardFormula() const override
FormulaContext const & getContext() const
virtual bool isReachabilityProbabilityFormula() const override
RewardAccumulation const & getRewardAccumulation() const
virtual bool isReachabilityTimeFormula() const override
TotalRewardFormula & asTotalRewardFormula()
Definition Formula.cpp:429
RewardOperatorFormula & asRewardOperatorFormula()
Definition Formula.cpp:461
virtual bool isGloballyFormula() const
Definition Formula.cpp:96
UntilFormula & asUntilFormula()
Definition Formula.cpp:317
BoundedUntilFormula & asBoundedUntilFormula()
Definition Formula.cpp:325
static std::shared_ptr< Formula const > getTrueFormula()
Definition Formula.cpp:205
LongRunAverageOperatorFormula & asLongRunAverageOperatorFormula()
Definition Formula.cpp:405
LongRunAverageRewardFormula & asLongRunAverageRewardFormula()
Definition Formula.cpp:445
virtual bool isProbabilityOperatorFormula() const
Definition Formula.cpp:172
virtual bool isCumulativeRewardFormula() const
Definition Formula.cpp:144
ProbabilityOperatorFormula & asProbabilityOperatorFormula()
Definition Formula.cpp:453
GloballyFormula & asGloballyFormula()
Definition Formula.cpp:373
virtual bool isUntilFormula() const
Definition Formula.cpp:80
virtual bool isRewardOperatorFormula() const
Definition Formula.cpp:176
virtual bool isLongRunAverageRewardFormula() const
Definition Formula.cpp:156
EventuallyFormula & asEventuallyFormula()
Definition Formula.cpp:333
TimeOperatorFormula & asTimeOperatorFormula()
Definition Formula.cpp:413
virtual bool isLongRunAverageOperatorFormula() const
Definition Formula.cpp:136
virtual bool isBoundedUntilFormula() const
Definition Formula.cpp:84
virtual bool isEventuallyFormula() const
Definition Formula.cpp:88
virtual bool isTimeOperatorFormula() const
Definition Formula.cpp:140
virtual bool isTotalRewardFormula() const
Definition Formula.cpp:160
CumulativeRewardFormula & asCumulativeRewardFormula()
Definition Formula.cpp:421
std::shared_ptr< Formula const > asSharedPointer()
Definition Formula.cpp:548
std::shared_ptr< LongRunAverageRewardFormula const > stripRewardAccumulation() const
std::vector< std::shared_ptr< Formula const > > const & getSubformulas() const
Bound const & getBound() const
storm::solver::OptimizationDirection const & getOptimalityType() const
std::string const & getRewardModelName() const
Retrieves the name of the reward model this property refers to (if any).
bool hasRewardModelName() const
Retrieves whether the reward model refers to a specific reward model.
std::shared_ptr< TotalRewardFormula const > stripRewardAccumulation() const
Formula const & getSubformula() const
Formula const & getSubformula() const
static ReturnType preprocess(Environment const &env, SparseModelType const &originalModel, storm::logic::MultiObjectiveFormula const &originalFormula)
Preprocesses the given model w.r.t.
This class represents a Markov automaton.
storm::storage::BitVector const & getMarkovianStates() const
Retrieves the set of Markovian states of the model.
A bit vector that is internally represented as a vector of 64-bit values.
Definition BitVector.h:18
void complement()
Negates all bits in the bit vector.
bool isSubsetOf(BitVector const &other) const
Checks whether all bits that are set in the current bit vector are also set in the given bit vector.
size_t size() const
Retrieves the number of bits this bit vector can store.
bool get(uint_fast64_t index) const
Retrieves the truth value of the bit at the given index and performs a bound check.
A class that holds a possibly non-square matrix in the compressed row storage format.
static std::shared_ptr< SparseModelType > incorporateFullMemory(SparseModelType const &model, uint64_t memoryStates)
Incorporates a memory structure where the nondeterminism of the model decides which successor state t...
static std::shared_ptr< SparseModelType > incorporateGoalMemory(SparseModelType const &model, std::vector< std::shared_ptr< storm::logic::Formula const > > const &formulas)
Incorporates memory that stores whether a 'goal' state has already been reached.
static std::shared_ptr< SparseModelType > incorporateCountingMemory(SparseModelType const &model, uint64_t memoryStates)
Incorporates a memory structure where the nondeterminism of the model can increment a counter.
#define STORM_LOG_INFO(message)
Definition logging.h:29
#define STORM_LOG_WARN_COND(cond, message)
Definition macros.h:38
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
bool isLowerBound(ComparisonType t)
storm::storage::BitVector getOnlyReachableViaPhi(SparseModelType const &model, storm::storage::BitVector const &phi)
storm::logic::OperatorInformation getOperatorInformation(storm::logic::OperatorFormula const &formula, bool considerComplementaryEvent)
OptimizationDirection constexpr invert(OptimizationDirection d)
SubsystemBuilderReturnType< ValueType, RewardModelType > buildSubsystem(storm::models::sparse::Model< ValueType, RewardModelType > const &originalModel, storm::storage::BitVector const &subsystemStates, storm::storage::BitVector const &subsystemActions, bool keepUnreachableStates, SubsystemBuilderOptions options)
storm::storage::BitVector getReachableStates(storm::storage::SparseMatrix< T > const &transitionMatrix, storm::storage::BitVector const &initialStates, storm::storage::BitVector const &constraintStates, storm::storage::BitVector const &targetStates, bool useStepBound, uint_fast64_t maximalSteps, boost::optional< storm::storage::BitVector > const &choiceFilter)
Performs a forward depth-first search through the underlying graph structure to identify the states t...
Definition graph.cpp:48
storm::storage::BitVector performProb1A(storm::models::sparse::NondeterministicModel< T, RM > const &model, storm::storage::SparseMatrix< T > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates)
Computes the sets of states that have probability 1 of satisfying phi until psi under all possible re...
Definition graph.cpp:997
storm::storage::BitVector performProb0A(storm::storage::SparseMatrix< T > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates)
Definition graph.cpp:749
void setVectorValues(std::vector< T > &vector, storm::storage::BitVector const &positions, std::vector< T > const &values)
Sets the provided values at the provided positions in the given vector.
Definition vector.h:83
FilteredRewardModel< RewardModelType > createFilteredRewardModel(RewardModelType const &baseRewardModel, storm::logic::RewardAccumulation const &acc, bool isDiscreteTimeModel)
bool isZero(ValueType const &a)
Definition constants.cpp:41
LabParser.cpp.
Definition cli.cpp:18
boost::optional< Bound > bound
boost::optional< storm::solver::OptimizationDirection > optimalityType