Storm 1.11.1.1
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
ContinuousToDiscreteTimeModelTransformer.cpp
Go to the documentation of this file.
2
10
11namespace storm {
12namespace transformer {
13
14template<typename ValueType, typename RewardModelType>
15std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
16 storm::models::sparse::Ctmc<ValueType, RewardModelType> const& ctmc, boost::optional<std::string> const& timeRewardModelName) {
17 // Init the dtmc components
19 ctmc.getRewardModels());
20 dtmcComponents.choiceLabeling = ctmc.getOptionalChoiceLabeling();
21 dtmcComponents.stateValuations = ctmc.getOptionalStateValuations();
22 dtmcComponents.choiceOrigins = ctmc.getOptionalChoiceOrigins();
23
24 // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
25 std::vector<ValueType> const& exitRates = ctmc.getExitRateVector();
26 dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
27
28 // Transform the reward models
29 for (auto& rewardModel : dtmcComponents.rewardModels) {
30 if (rewardModel.second.hasStateRewards()) {
31 storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
32 }
33 }
34
35 if (timeRewardModelName) {
36 // Invert the exit rate vector in place
37 std::vector<ValueType> timeRewardVector;
38 timeRewardVector.reserve(exitRates.size());
39 for (auto const& r : exitRates) {
40 timeRewardVector.push_back(storm::utility::one<ValueType>() / r);
41 }
42 RewardModelType timeRewards(std::move(timeRewardVector));
43 auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
44 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
45 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
46 }
47
48 return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
49}
50
51template<typename ValueType, typename RewardModelType>
52std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
53 storm::models::sparse::Ctmc<ValueType, RewardModelType>&& ctmc, boost::optional<std::string> const& timeRewardModelName) {
54 // Init the dtmc components
55 storm::storage::sparse::ModelComponents<ValueType, RewardModelType> dtmcComponents(std::move(ctmc.getTransitionMatrix()),
56 std::move(ctmc.getStateLabeling()), std::move(ctmc.getRewardModels()));
57 dtmcComponents.choiceLabeling = std::move(ctmc.getOptionalChoiceLabeling());
58 dtmcComponents.stateValuations = std::move(ctmc.getOptionalStateValuations());
59 dtmcComponents.choiceOrigins = std::move(ctmc.getOptionalChoiceOrigins());
60
61 // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
62 std::vector<ValueType>& exitRates = ctmc.getExitRateVector();
63 dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
64
65 // Transform the reward models
66 for (auto& rewardModel : dtmcComponents.rewardModels) {
67 if (rewardModel.second.hasStateRewards()) {
68 storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
69 }
70 }
71
72 if (timeRewardModelName) {
73 // Invert the exit rate vector in place
74 storm::utility::vector::applyPointwise<ValueType, ValueType>(exitRates, exitRates,
75 [&](ValueType const& r) -> ValueType { return storm::utility::one<ValueType>() / r; });
76 RewardModelType timeRewards(std::move(exitRates));
77 auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
78 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
79 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
80 }
81 // Note: exitRates might be invalidated at this point.
82
83 return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
84}
85
86template<typename ValueType, typename RewardModelType>
100
101template<typename ValueType, typename RewardModelType>
103 std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, std::string const& timeRewardName) {
104 std::vector<std::shared_ptr<storm::logic::Formula const>> result;
106 for (auto const& f : formulas) {
107 // Translate expected time formulas
108 auto newF = v.substitute(*f);
109 if (preservesFormula(*newF)) {
110 result.push_back(newF);
111 } else {
112 STORM_LOG_INFO("Continuous to discrete time transformation does not preserve formula " << *f);
113 }
114 }
115 return result;
116}
117
118template<typename ValueType, typename RewardModelType>
119std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
120 storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType> const& ma, boost::optional<std::string> const& timeRewardModelName) {
121 STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
122
123 // Init the mdp components
125 mdpComponents.choiceLabeling = ma.getOptionalChoiceLabeling();
126 mdpComponents.stateValuations = ma.getOptionalStateValuations();
127 mdpComponents.choiceOrigins = ma.getOptionalChoiceOrigins();
128
129 // Markov automata already store the probability matrix
130
131 // Transform the reward models
132 std::vector<ValueType> const& exitRates = ma.getExitRates();
133 for (auto& rewardModel : mdpComponents.rewardModels) {
134 if (rewardModel.second.hasStateRewards()) {
135 auto& stateRewards = rewardModel.second.getStateRewardVector();
136 for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) {
137 if (ma.getMarkovianStates().get(state)) {
138 stateRewards[state] /= exitRates[state];
139 } else {
140 stateRewards[state] = storm::utility::zero<ValueType>();
141 }
142 }
143 }
144 }
145
146 if (timeRewardModelName) {
147 // Invert the exit rate vector. Avoid division by zero at probabilistic states
148 std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
149 for (auto state : ma.getMarkovianStates()) {
150 timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
151 }
152 RewardModelType timeRewards(std::move(timeRewardVector));
153 auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
154 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
155 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
156 }
157
158 return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
159}
160
161template<typename ValueType, typename RewardModelType>
162std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> ContinuousToDiscreteTimeModelTransformer<ValueType, RewardModelType>::transform(
163 storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType>&& ma, boost::optional<std::string> const& timeRewardModelName) {
164 STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
165 std::vector<ValueType>& exitRates = ma.getExitRates();
166
167 // Init the mdp components
168 storm::storage::sparse::ModelComponents<ValueType, RewardModelType> mdpComponents(std::move(ma.getTransitionMatrix()), std::move(ma.getStateLabeling()),
169 std::move(ma.getRewardModels()));
170 mdpComponents.choiceLabeling = std::move(ma.getOptionalChoiceLabeling());
171 mdpComponents.stateValuations = std::move(ma.getOptionalStateValuations());
172 mdpComponents.choiceOrigins = std::move(ma.getOptionalChoiceOrigins());
173
174 // Markov automata already store the probability matrix
175
176 // Transform the reward models
177 for (auto& rewardModel : mdpComponents.rewardModels) {
178 if (rewardModel.second.hasStateRewards()) {
179 auto& stateRewards = rewardModel.second.getStateRewardVector();
180 for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) {
181 if (ma.getMarkovianStates().get(state)) {
182 stateRewards[state] /= exitRates[state];
183 } else {
184 stateRewards[state] = storm::utility::zero<ValueType>();
185 }
186 }
187 }
188 }
189
190 if (timeRewardModelName) {
191 // Invert the exit rate vector. Avoid division by zero at probabilistic states
192 std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
193 for (auto state : ma.getMarkovianStates()) {
194 timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
195 }
196 RewardModelType timeRewards(std::move(timeRewardVector));
197 auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
198 STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException,
199 "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
200 }
201
202 return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
203}
204
208} // namespace transformer
209} // namespace storm
std::shared_ptr< Formula > substitute(Formula const &f) const
bool isInFragment(FragmentSpecification const &fragment) const
Definition Formula.cpp:204
FragmentSpecification & setNextFormulasAllowed(bool newValue)
FragmentSpecification & setMultiObjectiveFormulasAllowed(bool newValue)
FragmentSpecification & setGloballyFormulasAllowed(bool newValue)
FragmentSpecification & setReachabilityProbabilityFormulasAllowed(bool newValue)
FragmentSpecification & setTotalRewardFormulasAllowed(bool newValue)
FragmentSpecification & setReachabilityRewardFormulasAllowed(bool newValue)
FragmentSpecification & setRewardOperatorsAllowed(bool newValue)
FragmentSpecification & setUntilFormulasAllowed(bool newValue)
FragmentSpecification & setProbabilityOperatorsAllowed(bool newValue)
This class represents a continuous-time Markov chain.
Definition Ctmc.h:13
std::vector< ValueType > const & getExitRateVector() const
Retrieves the vector of exit rates of the model.
Definition Ctmc.cpp:59
This class represents a Markov automaton.
bool isClosed() const
Retrieves whether the Markov automaton is closed.
std::vector< ValueType > const & getExitRates() const
Retrieves the vector representing the exit rates of the states.
storm::storage::BitVector const & getMarkovianStates() const
Retrieves the set of Markovian states of the model.
storm::storage::SparseMatrix< ValueType > const & getTransitionMatrix() const
Retrieves the matrix representing the transitions of the model.
Definition Model.cpp:199
std::unordered_map< std::string, RewardModelType > const & getRewardModels() const
Retrieves the reward models.
Definition Model.cpp:691
std::optional< std::shared_ptr< storm::storage::sparse::ChoiceOrigins > > const & getOptionalChoiceOrigins() const
Retrieves an optional value that contains the choice origins if there are some.
Definition Model.cpp:381
storm::models::sparse::StateLabeling const & getStateLabeling() const
Returns the state labeling associated with this model.
Definition Model.cpp:321
std::optional< storm::models::sparse::ChoiceLabeling > const & getOptionalChoiceLabeling() const
Retrieves an optional value that contains the choice labeling if there is one.
Definition Model.cpp:341
std::optional< storm::storage::sparse::StateValuations > const & getOptionalStateValuations() const
Retrieves an optional value that contains the state valuations if there are some.
Definition Model.cpp:361
bool get(uint64_t index) const
Retrieves the truth value of the bit at the given index and performs a bound check.
static std::vector< std::shared_ptr< storm::logic::Formula const > > checkAndTransformFormulas(std::vector< std::shared_ptr< storm::logic::Formula const > > const &formulas, std::string const &timeRewardName)
static std::shared_ptr< storm::models::sparse::Dtmc< ValueType, RewardModelType > > transform(storm::models::sparse::Ctmc< ValueType, RewardModelType > const &ctmc, boost::optional< std::string > const &timeRewardModelName=boost::none)
#define STORM_LOG_INFO(message)
Definition logging.h:24
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
FragmentSpecification propositional()
void divideVectorsPointwise(std::vector< InValueType1 > const &firstOperand, std::vector< InValueType2 > const &secondOperand, std::vector< OutValueType > &target)
Divides the two given vectors (pointwise) and writes the result to the target vector.
Definition vector.h:436
std::optional< storm::storage::sparse::StateValuations > stateValuations
std::unordered_map< std::string, RewardModelType > rewardModels
storm::storage::SparseMatrix< ValueType > transitionMatrix
std::optional< std::shared_ptr< storm::storage::sparse::ChoiceOrigins > > choiceOrigins
std::optional< storm::models::sparse::ChoiceLabeling > choiceLabeling