Storm 1.10.0.1
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
SparseMultiObjectivePreprocessor.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <set>
5
20#include "storm/utility/graph.h"
23
27
28namespace storm {
29namespace modelchecker {
30namespace multiobjective {
31namespace preprocessing {
32
33template<typename SparseModelType>
35 Environment const& env, SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, bool produceScheduler) {
36 std::shared_ptr<SparseModelType> model;
37 std::optional<storm::storage::SparseModelMemoryProductReverseData> memoryIncorporationReverseData;
38
39 // Incorporate the necessary memory
41 auto const& schedRestr = env.modelchecker().multi().getSchedulerRestriction();
42 if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::GoalMemory) {
43 if (produceScheduler) {
44 std::tie(model, memoryIncorporationReverseData) =
46 originalFormula.getSubformulas());
47 } else {
49 }
50 } else if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::Arbitrary && schedRestr.getMemoryStates() > 1) {
51 STORM_LOG_THROW(!produceScheduler, storm::exceptions::NotImplementedException, "Cannot produce schedulers for the provided memory pattern.");
52 model = storm::transformer::MemoryIncorporation<SparseModelType>::incorporateFullMemory(originalModel, schedRestr.getMemoryStates());
53 } else if (schedRestr.getMemoryPattern() == storm::storage::SchedulerClass::MemoryPattern::Counter && schedRestr.getMemoryStates() > 1) {
54 STORM_LOG_THROW(!produceScheduler, storm::exceptions::NotImplementedException, "Cannot produce schedulers for the provided memory pattern.");
55 model = storm::transformer::MemoryIncorporation<SparseModelType>::incorporateCountingMemory(originalModel, schedRestr.getMemoryStates());
56 } else if (schedRestr.isPositional()) {
57 model = std::make_shared<SparseModelType>(originalModel);
58 } else {
59 STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "The given scheduler restriction has not been implemented.");
60 }
61 } else {
62 if (produceScheduler) {
63 std::tie(model, memoryIncorporationReverseData) =
65 } else {
67 }
68 }
69
70 // Remove states that are irrelevant for all properties (e.g. because they are only reachable via goal states
71 boost::optional<std::string> deadlockLabel;
72 if (!produceScheduler) {
73 // When producing schedulers, removing irrelevant states requires additional bookkeeping.
74 removeIrrelevantStates(model, deadlockLabel, originalFormula);
75 }
76
77 PreprocessorData data(model);
78 data.deadlockLabel = deadlockLabel;
79 data.memoryIncorporationReverseData = std::move(memoryIncorporationReverseData);
80
81 // Invoke preprocessing on the individual objectives
82 for (auto const& subFormula : originalFormula.getSubformulas()) {
83 STORM_LOG_INFO("Preprocessing objective " << *subFormula << ".");
84 data.objectives.push_back(std::make_shared<Objective<ValueType>>());
85 data.objectives.back()->originalFormula = subFormula;
86 data.finiteRewardCheckObjectives.resize(data.objectives.size(), false);
87 data.upperResultBoundObjectives.resize(data.objectives.size(), false);
88 STORM_LOG_THROW(data.objectives.back()->originalFormula->isOperatorFormula(), storm::exceptions::InvalidPropertyException,
89 "Could not preprocess the subformula " << *subFormula << " of " << originalFormula << " because it is not supported");
90 preprocessOperatorFormula(data.objectives.back()->originalFormula->asOperatorFormula(), data);
91 }
92
93 // Remove reward models that are not needed anymore
94 std::set<std::string> relevantRewardModels;
95 for (auto const& obj : data.objectives) {
96 obj->formula->gatherReferencedRewardModels(relevantRewardModels);
97 }
98 data.model->restrictRewardModels(relevantRewardModels);
99
100 // Build the actual result
101 return buildResult(originalModel, originalFormula, data);
102}
103
104template<typename SparseModelType>
106 // Get the complement of the states that are reachable without visiting phi
107 auto result =
108 storm::utility::graph::getReachableStates(model.getTransitionMatrix(), model.getInitialStates(), ~phi, storm::storage::BitVector(phi.size(), false));
109 result.complement();
110 assert(phi.isSubsetOf(result));
111 return result;
112}
113
114template<typename SparseModelType>
115void SparseMultiObjectivePreprocessor<SparseModelType>::removeIrrelevantStates(std::shared_ptr<SparseModelType>& model,
116 boost::optional<std::string>& deadlockLabel,
117 storm::logic::MultiObjectiveFormula const& originalFormula) {
118 storm::storage::BitVector absorbingStates(model->getNumberOfStates(), true);
119
121 storm::storage::SparseMatrix<ValueType> backwardTransitions = model->getBackwardTransitions();
122
123 for (auto const& opFormula : originalFormula.getSubformulas()) {
124 // Compute a set of states from which we can make any subset absorbing without affecting this subformula
125 storm::storage::BitVector absorbingStatesForSubformula;
126 STORM_LOG_THROW(opFormula->isOperatorFormula(), storm::exceptions::InvalidPropertyException,
127 "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported");
128 auto const& pathFormula = opFormula->asOperatorFormula().getSubformula();
129 if (opFormula->isProbabilityOperatorFormula()) {
130 if (pathFormula.isUntilFormula()) {
131 auto lhs = mc.check(pathFormula.asUntilFormula().getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
132 auto rhs = mc.check(pathFormula.asUntilFormula().getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
133 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
134 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
135 } else if (pathFormula.isBoundedUntilFormula()) {
136 if (pathFormula.asBoundedUntilFormula().hasMultiDimensionalSubformulas()) {
137 absorbingStatesForSubformula = storm::storage::BitVector(model->getNumberOfStates(), true);
138 storm::storage::BitVector absorbingStatesForSubSubformula;
139 for (uint64_t i = 0; i < pathFormula.asBoundedUntilFormula().getDimension(); ++i) {
140 auto subPathFormula = pathFormula.asBoundedUntilFormula().restrictToDimension(i);
141 auto lhs =
142 mc.check(pathFormula.asBoundedUntilFormula().getLeftSubformula(i))->asExplicitQualitativeCheckResult().getTruthValuesVector();
143 auto rhs =
144 mc.check(pathFormula.asBoundedUntilFormula().getRightSubformula(i))->asExplicitQualitativeCheckResult().getTruthValuesVector();
145 absorbingStatesForSubSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
146 if (pathFormula.asBoundedUntilFormula().hasLowerBound(i)) {
147 absorbingStatesForSubSubformula |= getOnlyReachableViaPhi(*model, ~lhs);
148 } else {
149 absorbingStatesForSubSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
150 }
151 absorbingStatesForSubformula &= absorbingStatesForSubSubformula;
152 }
153 } else {
154 auto lhs = mc.check(pathFormula.asBoundedUntilFormula().getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
155 auto rhs = mc.check(pathFormula.asBoundedUntilFormula().getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
156 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, lhs, rhs);
157 if (pathFormula.asBoundedUntilFormula().hasLowerBound()) {
158 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs);
159 } else {
160 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, ~lhs | rhs);
161 }
162 }
163 } else if (pathFormula.isGloballyFormula()) {
164 auto phi = mc.check(pathFormula.asGloballyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
165 auto notPhi = ~phi;
166 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, phi, notPhi);
167 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, notPhi);
168 } else if (pathFormula.isEventuallyFormula()) {
169 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
170 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, ~phi, phi);
171 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, phi);
172 } else {
173 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
174 }
175 } else if (opFormula->isRewardOperatorFormula()) {
176 auto const& baseRewardModel = opFormula->asRewardOperatorFormula().hasRewardModelName()
177 ? model->getRewardModel(opFormula->asRewardOperatorFormula().getRewardModelName())
178 : model->getUniqueRewardModel();
179 if (pathFormula.isEventuallyFormula()) {
180 auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asEventuallyFormula());
181 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
182 // Make states that can not reach a state with non-zero reward absorbing
183 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
184 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
185 // Make states that reach phi with prob 1 while only visiting states with reward 0 absorbing
186 absorbingStatesForSubformula |= storm::utility::graph::performProb1A(
187 model->getTransitionMatrix(), model->getTransitionMatrix().getRowGroupIndices(), backwardTransitions, statesWithoutReward, phi);
188 // Make states that are only reachable via phi absorbing
189 absorbingStatesForSubformula |= getOnlyReachableViaPhi(*model, phi);
190 } else if (pathFormula.isCumulativeRewardFormula()) {
191 auto rewardModel =
192 storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asCumulativeRewardFormula());
193 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
194 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
195 } else if (pathFormula.isTotalRewardFormula()) {
196 auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asTotalRewardFormula());
197 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
198 absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
199 } else if (pathFormula.isLongRunAverageRewardFormula()) {
200 auto rewardModel =
201 storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asLongRunAverageRewardFormula());
202 storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
203 // Compute Sat(Forall F (Forall G "statesWithoutReward"))
204 auto forallGloballyStatesWithoutReward = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
205 absorbingStatesForSubformula =
206 storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions,
207 storm::storage::BitVector(model->getNumberOfStates(), true), forallGloballyStatesWithoutReward);
208 } else {
209 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
210 }
211 } else if (opFormula->isTimeOperatorFormula()) {
212 if (pathFormula.isEventuallyFormula()) {
213 auto phi = mc.check(pathFormula.asEventuallyFormula().getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
214 absorbingStatesForSubformula = getOnlyReachableViaPhi(*model, phi);
215 } else {
216 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
217 }
218 } else if (opFormula->isLongRunAverageOperatorFormula()) {
219 auto lraStates = mc.check(pathFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
220 // Compute Sat(Forall F (Forall G not "lraStates"))
221 auto forallGloballyNotLraStates = storm::utility::graph::performProb0A(backwardTransitions, ~lraStates, lraStates);
222 absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(),
223 backwardTransitions, ~lraStates, forallGloballyNotLraStates);
224 } else {
225 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
226 "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported");
227 }
228 absorbingStates &= absorbingStatesForSubformula;
229 if (absorbingStates.empty()) {
230 break;
231 }
232 }
233
234 if (!absorbingStates.empty()) {
235 // We can make the states absorbing and delete unreachable states.
236 storm::storage::BitVector subsystemActions(model->getNumberOfChoices(), true);
237 for (auto absorbingState : absorbingStates) {
238 for (uint64_t action = model->getTransitionMatrix().getRowGroupIndices()[absorbingState];
239 action < model->getTransitionMatrix().getRowGroupIndices()[absorbingState + 1]; ++action) {
240 subsystemActions.set(action, false);
241 }
242 }
244 options.fixDeadlocks = true;
245 auto const& submodel =
246 storm::transformer::buildSubsystem(*model, storm::storage::BitVector(model->getNumberOfStates(), true), subsystemActions, false, options);
247 STORM_LOG_INFO("Making states absorbing reduced the state space from " << model->getNumberOfStates() << " to " << submodel.model->getNumberOfStates()
248 << ".");
249 model = submodel.model->template as<SparseModelType>();
250 deadlockLabel = submodel.deadlockLabel;
251 }
252}
253
254template<typename SparseModelType>
255SparseMultiObjectivePreprocessor<SparseModelType>::PreprocessorData::PreprocessorData(std::shared_ptr<SparseModelType> model) : model(model) {
256 // The rewardModelNamePrefix should not be a prefix of a reward model name of the given model to ensure uniqueness of new reward model names
257 rewardModelNamePrefix = "obj";
258 while (true) {
259 bool prefixIsUnique = true;
260 for (auto const& rewardModels : model->getRewardModels()) {
261 if (rewardModelNamePrefix.size() <= rewardModels.first.size()) {
262 if (std::mismatch(rewardModelNamePrefix.begin(), rewardModelNamePrefix.end(), rewardModels.first.begin()).first ==
263 rewardModelNamePrefix.end()) {
264 prefixIsUnique = false;
265 rewardModelNamePrefix = "_" + rewardModelNamePrefix;
266 break;
267 }
268 }
269 }
270 if (prefixIsUnique) {
271 break;
272 }
273 }
274}
275
278 if (formula.hasBound()) {
279 opInfo.bound = formula.getBound();
280 // Invert the bound (if necessary)
281 if (considerComplementaryEvent) {
282 opInfo.bound->threshold = opInfo.bound->threshold.getManager().rational(storm::utility::one<storm::RationalNumber>()) - opInfo.bound->threshold;
283 switch (opInfo.bound->comparisonType) {
285 opInfo.bound->comparisonType = storm::logic::ComparisonType::Less;
286 break;
288 opInfo.bound->comparisonType = storm::logic::ComparisonType::LessEqual;
289 break;
291 opInfo.bound->comparisonType = storm::logic::ComparisonType::Greater;
292 break;
294 opInfo.bound->comparisonType = storm::logic::ComparisonType::GreaterEqual;
295 break;
296 default:
297 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Current objective " << formula << " has unexpected comparison type");
298 }
299 }
300 if (storm::logic::isLowerBound(opInfo.bound->comparisonType)) {
301 opInfo.optimalityType = storm::solver::OptimizationDirection::Maximize;
302 } else {
303 opInfo.optimalityType = storm::solver::OptimizationDirection::Minimize;
304 }
306 "Optimization direction of formula " << formula << " ignored as the formula also specifies a threshold.");
307 } else if (formula.hasOptimalityType()) {
308 opInfo.optimalityType = formula.getOptimalityType();
309 // Invert the optimality type (if necessary)
310 if (considerComplementaryEvent) {
312 }
313 } else {
314 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Objective " << formula << " does not specify whether to minimize or maximize");
315 }
316 return opInfo;
317}
318
319template<typename SparseModelType>
320void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data) {
321 Objective<ValueType>& objective = *data.objectives.back();
322
323 // Check whether the complementary event is considered
325
326 // Extract the operator information from the formula and potentially invert it for the complementary event
327 storm::logic::OperatorInformation opInfo = getOperatorInformation(formula, objective.considersComplementaryEvent);
328
329 if (formula.isProbabilityOperatorFormula()) {
330 preprocessProbabilityOperatorFormula(formula.asProbabilityOperatorFormula(), opInfo, data);
331 } else if (formula.isRewardOperatorFormula()) {
332 preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data);
333 } else if (formula.isTimeOperatorFormula()) {
334 preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data);
335 } else if (formula.isLongRunAverageOperatorFormula()) {
336 preprocessLongRunAverageOperatorFormula(formula.asLongRunAverageOperatorFormula(), opInfo, data);
337 } else {
338 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported");
339 }
340}
341
342template<typename SparseModelType>
343void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula,
345 PreprocessorData& data) {
346 // Probabilities are between zero and one
347 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
348 data.objectives.back()->upperResultBound = storm::utility::one<ValueType>();
349
350 if (formula.getSubformula().isUntilFormula()) {
351 preprocessUntilFormula(formula.getSubformula().asUntilFormula(), opInfo, data);
352 } else if (formula.getSubformula().isBoundedUntilFormula()) {
353 preprocessBoundedUntilFormula(formula.getSubformula().asBoundedUntilFormula(), opInfo, data);
354 } else if (formula.getSubformula().isGloballyFormula()) {
355 preprocessGloballyFormula(formula.getSubformula().asGloballyFormula(), opInfo, data);
356 } else if (formula.getSubformula().isEventuallyFormula()) {
357 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data);
358 } else {
359 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
360 }
361}
362
363template<typename SparseModelType>
364void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula,
366 PreprocessorData& data) {
367 std::string rewardModelName;
368 if (formula.hasRewardModelName()) {
369 rewardModelName = formula.getRewardModelName();
370 STORM_LOG_THROW(data.model->hasRewardModel(rewardModelName), storm::exceptions::InvalidPropertyException,
371 "The reward model specified by formula " << formula << " does not exist in the model");
372 } else {
373 // We have to assert that a unique reward model exists, and we need to find its name.
374 // However, we might have added auxiliary reward models for other objectives which we have to filter out here.
375 auto prefixOf = [](std::string const& left, std::string const& right) {
376 return std::mismatch(left.begin(), left.end(), right.begin()).first == left.end();
377 };
378 bool uniqueRewardModelFound = false;
379 for (auto const& rewModel : data.model->getRewardModels()) {
380 if (prefixOf(data.rewardModelNamePrefix, rewModel.first)) {
381 // Skip auxiliary reward model
382 continue;
383 }
384 STORM_LOG_THROW(!uniqueRewardModelFound, storm::exceptions::InvalidOperationException,
385 "The formula " << formula << " does not specify a reward model name and the reward model is not unique.");
386 uniqueRewardModelFound = true;
387 rewardModelName = rewModel.first;
388 }
389 STORM_LOG_THROW(uniqueRewardModelFound, storm::exceptions::InvalidOperationException,
390 "The formula " << formula << " refers to an unnamed reward model but no reward model has been defined.");
391 }
392
393 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
394
395 if (formula.getSubformula().isEventuallyFormula()) {
396 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data, rewardModelName);
397 } else if (formula.getSubformula().isCumulativeRewardFormula()) {
398 preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName);
399 } else if (formula.getSubformula().isTotalRewardFormula()) {
400 preprocessTotalRewardFormula(formula.getSubformula().asTotalRewardFormula(), opInfo, data, rewardModelName);
401 } else if (formula.getSubformula().isLongRunAverageRewardFormula()) {
402 preprocessLongRunAverageRewardFormula(formula.getSubformula().asLongRunAverageRewardFormula(), opInfo, data, rewardModelName);
403 } else {
404 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
405 }
406}
407
408template<typename SparseModelType>
409void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula,
410 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
411 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
412
413 if (formula.getSubformula().isEventuallyFormula()) {
414 preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data);
415 } else {
416 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
417 }
418}
419
420template<typename SparseModelType>
421void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula,
423 PreprocessorData& data) {
424 data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
425 data.objectives.back()->upperResultBound = storm::utility::one<ValueType>();
426
427 // Convert to a long run average reward formula
428 // Create and add the new formula
429 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
430 auto lraRewardFormula = std::make_shared<storm::logic::LongRunAverageRewardFormula>();
431 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(lraRewardFormula, rewardModelName, opInfo);
432
433 // Create and add the new reward model that only gives one reward for goal states
435 storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
436 std::vector<typename SparseModelType::ValueType> lraRewards(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>());
437 storm::utility::vector::setVectorValues(lraRewards, subFormulaResult, storm::utility::one<typename SparseModelType::ValueType>());
438 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(lraRewards)));
439}
440
441template<typename SparseModelType>
442void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessUntilFormula(storm::logic::UntilFormula const& formula,
443 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
444 std::shared_ptr<storm::logic::Formula const> subformula) {
445 // Try to transform the formula to expected total (or cumulative) rewards
446
448 storm::storage::BitVector rightSubformulaResult = mc.check(formula.getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
449 // Check if the formula is already satisfied in the initial state because then the transformation to expected rewards will fail.
450 // TODO: Handle this case more properly
451 STORM_LOG_THROW((data.model->getInitialStates() & rightSubformulaResult).empty(), storm::exceptions::NotImplementedException,
452 "The Probability for the objective "
453 << *data.objectives.back()->originalFormula
454 << " is always one as the rhs of the until formula is true in the initial state. This (trivial) case is currently not implemented.");
455
456 // Whenever a state that violates the left subformula or satisfies the right subformula is reached, the objective is 'decided', i.e., no more reward should
457 // be collected from there
458 storm::storage::BitVector notLeftOrRight = mc.check(formula.getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
459 notLeftOrRight.complement();
460 notLeftOrRight |= rightSubformulaResult;
461
462 // Get the states that are reachable from a notLeftOrRight state
463 storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false);
464 storm::storage::BitVector reachableFromGoal =
465 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), notLeftOrRight, allStates, noStates);
466 // Get the states that are reachable from an initial state, stopping at the states reachable from goal
467 storm::storage::BitVector reachableFromInit =
468 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), ~notLeftOrRight, reachableFromGoal);
469 // Exclude the actual notLeftOrRight states from the states that are reachable from init
470 reachableFromInit &= ~notLeftOrRight;
471 // If we can reach a state that is reachable from goal, but which is not a goal state, it means that the transformation to expected rewards is not possible.
472 if ((reachableFromInit & reachableFromGoal).empty()) {
473 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " is transformed to an expected total/cumulative reward property.");
474 // Transform to expected total rewards:
475 // build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from a reachableFromInit state to a
476 // goalState
477 std::vector<typename SparseModelType::ValueType> objectiveRewards(data.model->getTransitionMatrix().getRowCount(),
478 storm::utility::zero<typename SparseModelType::ValueType>());
479 for (auto state : reachableFromInit) {
480 for (uint_fast64_t row = data.model->getTransitionMatrix().getRowGroupIndices()[state];
481 row < data.model->getTransitionMatrix().getRowGroupIndices()[state + 1]; ++row) {
482 objectiveRewards[row] = data.model->getTransitionMatrix().getConstrainedRowSum(row, rightSubformulaResult);
483 }
484 }
485 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
486 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::nullopt, std::move(objectiveRewards)));
487 if (subformula == nullptr) {
488 subformula = std::make_shared<storm::logic::TotalRewardFormula>();
489 }
490 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(subformula, rewardModelName, opInfo);
491 } else {
492 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property.");
493 data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo);
494 }
495}
496
497template<typename SparseModelType>
498void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula,
499 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
500 // Check how to handle this query
501 if (formula.isMultiDimensional() || formula.getTimeBoundReference().isRewardBound()) {
502 STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " is not transformed to an expected cumulative reward property.");
503 data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo);
504 } else if (!formula.hasLowerBound() || (!formula.isLowerBoundStrict() && storm::utility::isZero(formula.template getLowerBound<storm::RationalNumber>()))) {
505 std::shared_ptr<storm::logic::Formula const> subformula;
506 if (!formula.hasUpperBound()) {
507 // The formula is actually unbounded
508 subformula = std::make_shared<storm::logic::TotalRewardFormula>();
509 } else {
511 storm::exceptions::InvalidPropertyException,
512 "Bounded until formulas for Markov Automata are only allowed when time bounds are considered.");
513 storm::logic::TimeBound bound(formula.isUpperBoundStrict(), formula.getUpperBound());
514 subformula = std::make_shared<storm::logic::CumulativeRewardFormula>(bound, formula.getTimeBoundReference());
515 }
517 opInfo, data, subformula);
518 } else {
519 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Property " << formula << "is not supported");
520 }
521}
522
523template<typename SparseModelType>
524void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessGloballyFormula(storm::logic::GloballyFormula const& formula,
525 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
526 // The formula is transformed to an until formula for the complementary event.
527 auto negatedSubformula = std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not,
528 formula.getSubformula().asSharedPointer());
529
530 preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), negatedSubformula), opInfo, data);
531}
532
533template<typename SparseModelType>
534void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula,
535 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
536 boost::optional<std::string> const& optionalRewardModelName) {
537 if (formula.isReachabilityProbabilityFormula()) {
538 preprocessUntilFormula(
539 *std::make_shared<storm::logic::UntilFormula>(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), opInfo, data);
540 return;
541 }
542
543 // Analyze the subformula
545 storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
546
547 // Get the states that are reachable from a goal state
548 storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false);
549 storm::storage::BitVector reachableFromGoal =
550 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), subFormulaResult, allStates, noStates);
551 // Get the states that are reachable from an initial state, stopping at the states reachable from goal
552 storm::storage::BitVector reachableFromInit =
553 storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), allStates, reachableFromGoal);
554 // Exclude the actual goal states from the states that are reachable from an initial state
555 reachableFromInit &= ~subFormulaResult;
556 // If we can reach a state that is reachable from goal but which is not a goal state, it means that the transformation to expected total rewards is not
557 // possible.
558 if ((reachableFromInit & reachableFromGoal).empty()) {
559 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " is transformed to an expected total reward property.");
560 // Transform to expected total rewards:
561
562 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
563 auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>();
564 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, rewardModelName, opInfo);
565
566 if (formula.isReachabilityRewardFormula()) {
567 // build stateAction reward vector that only gives reward for states that are reachable from init
568 assert(optionalRewardModelName.is_initialized());
569 auto objectiveRewards =
570 storm::utility::createFilteredRewardModel(data.model->getRewardModel(optionalRewardModelName.get()), data.model->isDiscreteTimeModel(), formula)
571 .extract();
572 // get rid of potential transition rewards
573 objectiveRewards.reduceToStateBasedRewards(data.model->getTransitionMatrix(), false);
574 if (objectiveRewards.hasStateRewards()) {
575 storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), reachableFromGoal,
576 storm::utility::zero<typename SparseModelType::ValueType>());
577 }
578 if (objectiveRewards.hasStateActionRewards()) {
579 for (auto state : reachableFromGoal) {
580 std::fill_n(objectiveRewards.getStateActionRewardVector().begin() + data.model->getTransitionMatrix().getRowGroupIndices()[state],
581 data.model->getTransitionMatrix().getRowGroupSize(state), storm::utility::zero<typename SparseModelType::ValueType>());
582 }
583 }
584 data.model->addRewardModel(rewardModelName, std::move(objectiveRewards));
585 } else if (formula.isReachabilityTimeFormula()) {
586 // build state reward vector that only gives reward for relevant states
587 std::vector<typename SparseModelType::ValueType> timeRewards(data.model->getNumberOfStates(),
588 storm::utility::zero<typename SparseModelType::ValueType>());
589 if (data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) {
591 timeRewards,
593 reachableFromInit,
594 storm::utility::one<typename SparseModelType::ValueType>());
595 } else {
596 storm::utility::vector::setVectorValues(timeRewards, reachableFromInit, storm::utility::one<typename SparseModelType::ValueType>());
597 }
598 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards)));
599 } else {
600 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
601 "The formula " << formula << " neither considers reachability probabilities nor reachability rewards "
602 << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "")
603 << ". This is not supported.");
604 }
605 } else {
606 STORM_LOG_INFO("Objective " << *data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property.");
607 if (formula.isReachabilityRewardFormula()) {
608 // TODO: this probably needs some better treatment regarding schedulers that do not reach the goal state allmost surely
609 assert(optionalRewardModelName.is_initialized());
610 if (data.deadlockLabel) {
611 // We made some states absorbing and created a new deadlock state. To make sure that this deadlock state gets value zero, we add it to the set
612 // of goal states of the formula.
613 std::shared_ptr<storm::logic::Formula const> newSubSubformula =
614 std::make_shared<storm::logic::AtomicLabelFormula const>(data.deadlockLabel.get());
615 std::shared_ptr<storm::logic::Formula const> newSubformula = std::make_shared<storm::logic::BinaryBooleanStateFormula const>(
616 storm::logic::BinaryBooleanStateFormula::OperatorType::Or, formula.getSubformula().asSharedPointer(), newSubSubformula);
617 boost::optional<storm::logic::RewardAccumulation> newRewardAccumulation;
618 if (formula.hasRewardAccumulation()) {
619 newRewardAccumulation = formula.getRewardAccumulation();
620 }
621 std::shared_ptr<storm::logic::Formula const> newFormula =
622 std::make_shared<storm::logic::EventuallyFormula const>(newSubformula, formula.getContext(), newRewardAccumulation);
623 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, optionalRewardModelName.get(), opInfo);
624 } else {
625 data.objectives.back()->formula =
626 std::make_shared<storm::logic::RewardOperatorFormula>(formula.asSharedPointer(), optionalRewardModelName.get(), opInfo);
627 }
628 } else if (formula.isReachabilityTimeFormula()) {
629 // Reduce to reachability rewards so that time formulas do not have to be treated seperately later.
630 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
631 std::shared_ptr<storm::logic::Formula const> newSubformula = formula.getSubformula().asSharedPointer();
632 if (data.deadlockLabel) {
633 // We made some states absorbing and created a new deadlock state. To make sure that this deadlock state gets value zero, we add it to the set
634 // of goal states of the formula.
635 std::shared_ptr<storm::logic::Formula const> newSubSubformula =
636 std::make_shared<storm::logic::AtomicLabelFormula const>(data.deadlockLabel.get());
637 newSubformula = std::make_shared<storm::logic::BinaryBooleanStateFormula const>(storm::logic::BinaryBooleanStateFormula::OperatorType::Or,
638 formula.getSubformula().asSharedPointer(), newSubSubformula);
639 }
640 auto newFormula = std::make_shared<storm::logic::EventuallyFormula>(newSubformula, storm::logic::FormulaContext::Reward);
641 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, rewardModelName, opInfo);
642 std::vector<typename SparseModelType::ValueType> timeRewards;
643 if (data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) {
644 timeRewards.assign(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>());
646 timeRewards,
648 storm::utility::one<typename SparseModelType::ValueType>());
649 } else {
650 timeRewards.assign(data.model->getNumberOfStates(), storm::utility::one<typename SparseModelType::ValueType>());
651 }
652 data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards)));
653 } else {
654 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
655 "The formula " << formula << " neither considers reachability probabilities nor reachability rewards "
656 << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "")
657 << ". This is not supported.");
658 }
659 }
660 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
661}
662
663template<typename SparseModelType>
664void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula,
666 PreprocessorData& data,
667 boost::optional<std::string> const& optionalRewardModelName) {
668 STORM_LOG_THROW(data.model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::InvalidPropertyException,
669 "Cumulative reward formulas are not supported for the given model type.");
670 std::string rewardModelName = optionalRewardModelName.get();
671 // Strip away potential RewardAccumulations in the formula itself but also in reward bounds
672 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
673 if (filteredRewards.isDifferentFromUnfilteredModel()) {
674 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
675 data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract()));
676 }
677
678 std::vector<storm::logic::TimeBoundReference> newTimeBoundReferences;
679 bool onlyRewardBounds = true;
680 for (uint64_t i = 0; i < formula.getDimension(); ++i) {
681 auto oldTbr = formula.getTimeBoundReference(i);
682 if (oldTbr.isRewardBound()) {
683 if (oldTbr.hasRewardAccumulation()) {
684 auto filteredBoundRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(oldTbr.getRewardName()),
685 oldTbr.getRewardAccumulation(), data.model->isDiscreteTimeModel());
686 if (filteredBoundRewards.isDifferentFromUnfilteredModel()) {
687 std::string freshRewardModelName =
688 data.rewardModelNamePrefix + std::to_string(data.objectives.size()) + std::string("_" + std::to_string(i));
689 data.model->addRewardModel(freshRewardModelName, std::move(filteredBoundRewards.extract()));
690 newTimeBoundReferences.emplace_back(freshRewardModelName);
691 } else {
692 // Strip away the reward accumulation
693 newTimeBoundReferences.emplace_back(oldTbr.getRewardName());
694 }
695 } else {
696 newTimeBoundReferences.push_back(oldTbr);
697 }
698 } else {
699 onlyRewardBounds = false;
700 newTimeBoundReferences.push_back(oldTbr);
701 }
702 }
703
704 auto newFormula = std::make_shared<storm::logic::CumulativeRewardFormula>(formula.getBounds(), newTimeBoundReferences);
705 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(newFormula, rewardModelName, opInfo);
706
707 if (onlyRewardBounds) {
708 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
709 }
710}
711
712template<typename SparseModelType>
713void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTotalRewardFormula(storm::logic::TotalRewardFormula const& formula,
714 storm::logic::OperatorInformation const& opInfo, PreprocessorData& data,
715 boost::optional<std::string> const& optionalRewardModelName) {
716 std::string rewardModelName = optionalRewardModelName.get();
717 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
718 if (filteredRewards.isDifferentFromUnfilteredModel()) {
719 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
720 data.model->addRewardModel(rewardModelName, filteredRewards.extract());
721 }
722 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(formula.stripRewardAccumulation(), rewardModelName, opInfo);
723 data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
724}
725
726template<typename SparseModelType>
727void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageRewardFormula(storm::logic::LongRunAverageRewardFormula const& formula,
729 PreprocessorData& data,
730 boost::optional<std::string> const& optionalRewardModelName) {
731 std::string rewardModelName = optionalRewardModelName.get();
732 auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula);
733 if (filteredRewards.isDifferentFromUnfilteredModel()) {
734 std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
735 data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract()));
736 }
737 data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(formula.stripRewardAccumulation(), rewardModelName, opInfo);
738}
739
740template<typename SparseModelType>
741typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::buildResult(
742 SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data) {
743 ReturnType result(originalFormula, originalModel);
744 auto backwardTransitions = data.model->getBackwardTransitions();
745 result.preprocessedModel = data.model;
746 result.memoryIncorporationReverseData = data.memoryIncorporationReverseData;
747
748 for (auto& obj : data.objectives) {
749 result.objectives.push_back(std::move(*obj));
750 }
751 result.queryType = getQueryType(result.objectives);
752 result.maybeInfiniteRewardObjectives = std::move(data.finiteRewardCheckObjectives);
753
754 return result;
755}
756
757template<typename SparseModelType>
758typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType::QueryType SparseMultiObjectivePreprocessor<SparseModelType>::getQueryType(
759 std::vector<Objective<ValueType>> const& objectives) {
760 uint_fast64_t numOfObjectivesWithThreshold = 0;
761 for (auto& obj : objectives) {
762 if (obj.formula->hasBound()) {
763 ++numOfObjectivesWithThreshold;
764 }
765 }
766 if (numOfObjectivesWithThreshold == objectives.size()) {
767 return ReturnType::QueryType::Achievability;
768 } else if (numOfObjectivesWithThreshold + 1 == objectives.size()) {
769 // Note: We do not want to consider a Pareto query when the total number of objectives is one.
770 return ReturnType::QueryType::Quantitative;
771 } else if (numOfObjectivesWithThreshold == 0) {
772 return ReturnType::QueryType::Pareto;
773 } else {
774 STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException,
775 "Invalid Multi-objective query: The numer of qualitative objectives should be either 0 (Pareto query), 1 (quantitative query), or "
776 "#objectives (achievability query).");
777 }
778}
779
780template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<double>>;
781template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<double>>;
782
783template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<storm::RationalNumber>>;
784template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>;
785} // namespace preprocessing
786} // namespace multiobjective
787} // namespace modelchecker
788} // namespace storm
ModelCheckerEnvironment & modelchecker()
MultiObjectiveModelCheckerEnvironment & multi()
storm::storage::SchedulerClass const & getSchedulerRestriction() const
Formula const & getRightSubformula() const
Formula const & getLeftSubformula() const
TimeBoundReference const & getTimeBoundReference(unsigned i=0) const
bool isLowerBoundStrict(unsigned i=0) const
storm::expressions::Expression const & getUpperBound(unsigned i=0) const
bool isUpperBoundStrict(unsigned i=0) const
TimeBoundReference const & getTimeBoundReference() const
std::vector< TimeBound > const & getBounds() const
virtual bool isReachabilityRewardFormula() const override
FormulaContext const & getContext() const
virtual bool isReachabilityProbabilityFormula() const override
RewardAccumulation const & getRewardAccumulation() const
virtual bool isReachabilityTimeFormula() const override
TotalRewardFormula & asTotalRewardFormula()
Definition Formula.cpp:429
RewardOperatorFormula & asRewardOperatorFormula()
Definition Formula.cpp:461
virtual bool isGloballyFormula() const
Definition Formula.cpp:96
UntilFormula & asUntilFormula()
Definition Formula.cpp:317
BoundedUntilFormula & asBoundedUntilFormula()
Definition Formula.cpp:325
static std::shared_ptr< Formula const > getTrueFormula()
Definition Formula.cpp:205
LongRunAverageOperatorFormula & asLongRunAverageOperatorFormula()
Definition Formula.cpp:405
LongRunAverageRewardFormula & asLongRunAverageRewardFormula()
Definition Formula.cpp:445
virtual bool isProbabilityOperatorFormula() const
Definition Formula.cpp:172
virtual bool isCumulativeRewardFormula() const
Definition Formula.cpp:144
ProbabilityOperatorFormula & asProbabilityOperatorFormula()
Definition Formula.cpp:453
GloballyFormula & asGloballyFormula()
Definition Formula.cpp:373
virtual bool isUntilFormula() const
Definition Formula.cpp:80
virtual bool isRewardOperatorFormula() const
Definition Formula.cpp:176
virtual bool isLongRunAverageRewardFormula() const
Definition Formula.cpp:156
EventuallyFormula & asEventuallyFormula()
Definition Formula.cpp:333
TimeOperatorFormula & asTimeOperatorFormula()
Definition Formula.cpp:413
virtual bool isLongRunAverageOperatorFormula() const
Definition Formula.cpp:136
virtual bool isBoundedUntilFormula() const
Definition Formula.cpp:84
virtual bool isEventuallyFormula() const
Definition Formula.cpp:88
virtual bool isTimeOperatorFormula() const
Definition Formula.cpp:140
virtual bool isTotalRewardFormula() const
Definition Formula.cpp:160
CumulativeRewardFormula & asCumulativeRewardFormula()
Definition Formula.cpp:421
std::shared_ptr< Formula const > asSharedPointer()
Definition Formula.cpp:548
std::shared_ptr< LongRunAverageRewardFormula const > stripRewardAccumulation() const
std::vector< std::shared_ptr< Formula const > > const & getSubformulas() const
Bound const & getBound() const
storm::solver::OptimizationDirection const & getOptimalityType() const
std::string const & getRewardModelName() const
Retrieves the name of the reward model this property refers to (if any).
bool hasRewardModelName() const
Retrieves whether the reward model refers to a specific reward model.
std::shared_ptr< TotalRewardFormula const > stripRewardAccumulation() const
Formula const & getSubformula() const
Formula const & getSubformula() const
static ReturnType preprocess(Environment const &env, SparseModelType const &originalModel, storm::logic::MultiObjectiveFormula const &originalFormula, bool produceScheduler)
Preprocesses the given model w.r.t.
This class represents a Markov automaton.
storm::storage::BitVector const & getMarkovianStates() const
Retrieves the set of Markovian states of the model.
A bit vector that is internally represented as a vector of 64-bit values.
Definition BitVector.h:16
void complement()
Negates all bits in the bit vector.
bool isSubsetOf(BitVector const &other) const
Checks whether all bits that are set in the current bit vector are also set in the given bit vector.
size_t size() const
Retrieves the number of bits this bit vector can store.
bool get(uint64_t index) const
Retrieves the truth value of the bit at the given index and performs a bound check.
A class that holds a possibly non-square matrix in the compressed row storage format.
static std::shared_ptr< SparseModelType > incorporateFullMemory(SparseModelType const &model, uint64_t memoryStates)
Incorporates a memory structure where the nondeterminism of the model decides which successor state t...
static std::pair< std::shared_ptr< SparseModelType >, storm::storage::SparseModelMemoryProductReverseData > incorporateGoalMemoryWithReverseData(SparseModelType const &model, std::vector< std::shared_ptr< storm::logic::Formula const > > const &formulas)
Like incorporateGoalMemory, but also returns data necessary to translate results (in particular sched...
static std::shared_ptr< SparseModelType > incorporateGoalMemory(SparseModelType const &model, std::vector< std::shared_ptr< storm::logic::Formula const > > const &formulas)
Incorporates memory that stores whether a 'goal' state has already been reached.
static std::shared_ptr< SparseModelType > incorporateCountingMemory(SparseModelType const &model, uint64_t memoryStates)
Incorporates a memory structure where the nondeterminism of the model can increment a counter.
#define STORM_LOG_INFO(message)
Definition logging.h:24
#define STORM_LOG_WARN_COND(cond, message)
Definition macros.h:38
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
bool isLowerBound(ComparisonType t)
storm::storage::BitVector getOnlyReachableViaPhi(SparseModelType const &model, storm::storage::BitVector const &phi)
storm::logic::OperatorInformation getOperatorInformation(storm::logic::OperatorFormula const &formula, bool considerComplementaryEvent)
OptimizationDirection constexpr invert(OptimizationDirection d)
SubsystemBuilderReturnType< ValueType, RewardModelType > buildSubsystem(storm::models::sparse::Model< ValueType, RewardModelType > const &originalModel, storm::storage::BitVector const &subsystemStates, storm::storage::BitVector const &subsystemActions, bool keepUnreachableStates, SubsystemBuilderOptions options)
storm::storage::BitVector getReachableStates(storm::storage::SparseMatrix< T > const &transitionMatrix, storm::storage::BitVector const &initialStates, storm::storage::BitVector const &constraintStates, storm::storage::BitVector const &targetStates, bool useStepBound, uint_fast64_t maximalSteps, boost::optional< storm::storage::BitVector > const &choiceFilter)
Performs a forward depth-first search through the underlying graph structure to identify the states t...
Definition graph.cpp:47
storm::storage::BitVector performProb1A(storm::models::sparse::NondeterministicModel< T, RM > const &model, storm::storage::SparseMatrix< T > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates)
Computes the sets of states that have probability 1 of satisfying phi until psi under all possible re...
Definition graph.cpp:987
storm::storage::BitVector performProb0A(storm::storage::SparseMatrix< T > const &backwardTransitions, storm::storage::BitVector const &phiStates, storm::storage::BitVector const &psiStates)
Definition graph.cpp:739
void setVectorValues(std::vector< T > &vector, storm::storage::BitVector const &positions, std::vector< T > const &values)
Sets the provided values at the provided positions in the given vector.
Definition vector.h:82
FilteredRewardModel< RewardModelType > createFilteredRewardModel(RewardModelType const &baseRewardModel, storm::logic::RewardAccumulation const &acc, bool isDiscreteTimeModel)
bool isZero(ValueType const &a)
Definition constants.cpp:41
LabParser.cpp.
boost::optional< Bound > bound
boost::optional< storm::solver::OptimizationDirection > optimalityType