Storm
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
ContinuousToDiscreteTimeModelTransformer.cpp
Go to the documentation of this file.
2
3#include <unordered_map>
4
12
15
16namespace storm {
17namespace transformer {
18
19template<typename ValueType, typename RewardModelType>
20std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
21 storm::models::sparse::Ctmc<ValueType, RewardModelType> const& ctmc, boost::optional<std::string> const& timeRewardModelName) {
22 // Init the dtmc components
24 ctmc.getRewardModels());
25 dtmcComponents.choiceLabeling = ctmc.getOptionalChoiceLabeling();
26 dtmcComponents.stateValuations = ctmc.getOptionalStateValuations();
27 dtmcComponents.choiceOrigins = ctmc.getOptionalChoiceOrigins();
28
29 // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
30 std::vector<ValueType> const& exitRates = ctmc.getExitRateVector();
31 dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
32
33 // Transform the reward models
34 for (auto& rewardModel : dtmcComponents.rewardModels) {
35 if (rewardModel.second.hasStateRewards()) {
36 storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
37 }
38 }
39
40 if (timeRewardModelName) {
41 // Invert the exit rate vector in place
42 std::vector<ValueType> timeRewardVector;
43 timeRewardVector.reserve(exitRates.size());
44 for (auto const& r : exitRates) {
45 timeRewardVector.push_back(storm::utility::one<ValueType>() / r);
46 }
47 RewardModelType timeRewards(std::move(timeRewardVector));
48 auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
49 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
50 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
51 }
52
53 return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
54}
55
56template<typename ValueType, typename RewardModelType>
57std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
58 storm::models::sparse::Ctmc<ValueType, RewardModelType>&& ctmc, boost::optional<std::string> const& timeRewardModelName) {
59 // Init the dtmc components
60 storm::storage::sparse::ModelComponents<ValueType, RewardModelType> dtmcComponents(std::move(ctmc.getTransitionMatrix()),
61 std::move(ctmc.getStateLabeling()), std::move(ctmc.getRewardModels()));
62 dtmcComponents.choiceLabeling = std::move(ctmc.getOptionalChoiceLabeling());
63 dtmcComponents.stateValuations = std::move(ctmc.getOptionalStateValuations());
64 dtmcComponents.choiceOrigins = std::move(ctmc.getOptionalChoiceOrigins());
65
66 // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
67 std::vector<ValueType>& exitRates = ctmc.getExitRateVector();
68 dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
69
70 // Transform the reward models
71 for (auto& rewardModel : dtmcComponents.rewardModels) {
72 if (rewardModel.second.hasStateRewards()) {
73 storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
74 }
75 }
76
77 if (timeRewardModelName) {
78 // Invert the exit rate vector in place
79 storm::utility::vector::applyPointwise<ValueType, ValueType>(exitRates, exitRates,
80 [&](ValueType const& r) -> ValueType { return storm::utility::one<ValueType>() / r; });
81 RewardModelType timeRewards(std::move(exitRates));
82 auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
83 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
84 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
85 }
86 // Note: exitRates might be invalidated at this point.
87
88 return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
89}
90
91template<typename ValueType, typename RewardModelType>
105
106template<typename ValueType, typename RewardModelType>
108 std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, std::string const& timeRewardName) {
109 std::vector<std::shared_ptr<storm::logic::Formula const>> result;
111 for (auto const& f : formulas) {
112 // Translate expected time formulas
113 auto newF = v.substitute(*f);
114 if (preservesFormula(*newF)) {
115 result.push_back(newF);
116 } else {
117 STORM_LOG_INFO("Continuous to discrete time transformation does not preserve formula " << *f);
118 }
119 }
120 return result;
121}
122
123template<typename ValueType, typename RewardModelType>
124std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
125 storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType> const& ma, boost::optional<std::string> const& timeRewardModelName) {
126 STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
127
128 // Init the mdp components
130 mdpComponents.choiceLabeling = ma.getOptionalChoiceLabeling();
131 mdpComponents.stateValuations = ma.getOptionalStateValuations();
132 mdpComponents.choiceOrigins = ma.getOptionalChoiceOrigins();
133
134 // Markov automata already store the probability matrix
135
136 // Transform the reward models
137 std::vector<ValueType> const& exitRates = ma.getExitRates();
138 for (auto& rewardModel : mdpComponents.rewardModels) {
139 if (rewardModel.second.hasStateRewards()) {
140 auto& stateRewards = rewardModel.second.getStateRewardVector();
141 for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) {
142 if (ma.getMarkovianStates().get(state)) {
143 stateRewards[state] /= exitRates[state];
144 } else {
145 stateRewards[state] = storm::utility::zero<ValueType>();
146 }
147 }
148 }
149 }
150
151 if (timeRewardModelName) {
152 // Invert the exit rate vector. Avoid division by zero at probabilistic states
153 std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
154 for (auto state : ma.getMarkovianStates()) {
155 timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
156 }
157 RewardModelType timeRewards(std::move(timeRewardVector));
158 auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
159 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
160 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
161 }
162
163 return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
164}
165
166template<typename ValueType, typename RewardModelType>
167std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
168 storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType>&& ma, boost::optional<std::string> const& timeRewardModelName) {
169 STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
170 std::vector<ValueType>& exitRates = ma.getExitRates();
171
172 // Init the mdp components
173 storm::storage::sparse::ModelComponents<ValueType, RewardModelType> mdpComponents(std::move(ma.getTransitionMatrix()), std::move(ma.getStateLabeling()),
174 std::move(ma.getRewardModels()));
175 mdpComponents.choiceLabeling = std::move(ma.getOptionalChoiceLabeling());
176 mdpComponents.stateValuations = std::move(ma.getOptionalStateValuations());
177 mdpComponents.choiceOrigins = std::move(ma.getOptionalChoiceOrigins());
178
179 // Markov automata already store the probability matrix
180
181 // Transform the reward models
182 for (auto& rewardModel : mdpComponents.rewardModels) {
183 if (rewardModel.second.hasStateRewards()) {
184 auto& stateRewards = rewardModel.second.getStateRewardVector();
185 for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) {
186 if (ma.getMarkovianStates().get(state)) {
187 stateRewards[state] /= exitRates[state];
188 } else {
189 stateRewards[state] = storm::utility::zero<ValueType>();
190 }
191 }
192 }
193 }
194
195 if (timeRewardModelName) {
196 // Invert the exit rate vector. Avoid division by zero at probabilistic states
197 std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
198 for (auto state : ma.getMarkovianStates()) {
199 timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
200 }
201 RewardModelType timeRewards(std::move(timeRewardVector));
202 auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
203 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
204 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
205 }
206
207 return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
208}
209
213} // namespace transformer
214} // namespace storm
std::shared_ptr< Formula > substitute(Formula const &f) const
bool isInFragment(FragmentSpecification const &fragment) const
Definition Formula.cpp:196
FragmentSpecification & setNextFormulasAllowed(bool newValue)
FragmentSpecification & setMultiObjectiveFormulasAllowed(bool newValue)
FragmentSpecification & setGloballyFormulasAllowed(bool newValue)
FragmentSpecification & setReachabilityProbabilityFormulasAllowed(bool newValue)
FragmentSpecification & setTotalRewardFormulasAllowed(bool newValue)
FragmentSpecification & setReachabilityRewardFormulasAllowed(bool newValue)
FragmentSpecification & setRewardOperatorsAllowed(bool newValue)
FragmentSpecification & setUntilFormulasAllowed(bool newValue)
FragmentSpecification & setProbabilityOperatorsAllowed(bool newValue)
This class represents a continuous-time Markov chain.
Definition Ctmc.h:15
std::vector< ValueType > const & getExitRateVector() const
Retrieves the vector of exit rates of the model.
Definition Ctmc.cpp:56
This class represents a Markov automaton.
bool isClosed() const
Retrieves whether the Markov automaton is closed.
std::vector< ValueType > const & getExitRates() const
Retrieves the vector representing the exit rates of the states.
storm::storage::BitVector const & getMarkovianStates() const
Retrieves the set of Markovian states of the model.
storm::storage::SparseMatrix< ValueType > const & getTransitionMatrix() const
Retrieves the matrix representing the transitions of the model.
Definition Model.cpp:197
std::unordered_map< std::string, RewardModelType > const & getRewardModels() const
Retrieves the reward models.
Definition Model.cpp:699
std::optional< std::shared_ptr< storm::storage::sparse::ChoiceOrigins > > const & getOptionalChoiceOrigins() const
Retrieves an optional value that contains the choice origins if there are some.
Definition Model.cpp:379
storm::models::sparse::StateLabeling const & getStateLabeling() const
Returns the state labeling associated with this model.
Definition Model.cpp:319
std::optional< storm::models::sparse::ChoiceLabeling > const & getOptionalChoiceLabeling() const
Retrieves an optional value that contains the choice labeling if there is one.
Definition Model.cpp:339
std::optional< storm::storage::sparse::StateValuations > const & getOptionalStateValuations() const
Retrieves an optional value that contains the state valuations if there are some.
Definition Model.cpp:359
bool get(uint_fast64_t index) const
Retrieves the truth value of the bit at the given index and performs a bound check.
static std::vector< std::shared_ptr< storm::logic::Formula const > > checkAndTransformFormulas(std::vector< std::shared_ptr< storm::logic::Formula const > > const &formulas, std::string const &timeRewardName)
static std::shared_ptr< storm::models::sparse::Dtmc< ValueType, RewardModelType > > transform(storm::models::sparse::Ctmc< ValueType, RewardModelType > const &ctmc, boost::optional< std::string > const &timeRewardModelName=boost::none)
#define STORM_LOG_INFO(message)
Definition logging.h:29
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
FragmentSpecification propositional()
void divideVectorsPointwise(std::vector< InValueType1 > const &firstOperand, std::vector< InValueType2 > const &secondOperand, std::vector< OutValueType > &target)
Divides the two given vectors (pointwise) and writes the result to the target vector.
Definition vector.h:480
LabParser.cpp.
Definition cli.cpp:18
std::optional< storm::storage::sparse::StateValuations > stateValuations
std::unordered_map< std::string, RewardModelType > rewardModels
storm::storage::SparseMatrix< ValueType > transitionMatrix
std::optional< std::shared_ptr< storm::storage::sparse::ChoiceOrigins > > choiceOrigins
std::optional< storm::models::sparse::ChoiceLabeling > choiceLabeling