Storm
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
SparseNondeterministicInfiniteHorizonHelper.cpp
Go to the documentation of this file.
2
5
10
14
17
20
22
23namespace storm {
24namespace modelchecker {
25namespace helper {
26
27template<typename ValueType>
33
34template<typename ValueType>
36 storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates)
37 : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix, markovianStates, exitRates) {
38 // Intentionally left empty.
39}
40
41template<typename ValueType>
43 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
44 STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(),
45 "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
46 return this->_producedOptimalChoices.get();
47}
48
49template<typename ValueType>
51 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
52 STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(),
53 "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
54 return this->_producedOptimalChoices.get();
55}
56
57template<typename ValueType>
59 auto const& optimalChoices = getProducedOptimalChoices();
60 storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
61 for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
62 scheduler.setChoice(optimalChoices[state], state);
63 }
64 return scheduler;
65}
66
67template<typename ValueType>
69 if (this->_longRunComponentDecomposition == nullptr) {
70 // The decomposition has not been provided or computed, yet.
71 this->createBackwardTransitions();
72 this->_computedLongRunComponentDecomposition =
73 std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions);
74 this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
75 }
76}
77
78template<typename ValueType>
80 ValueGetter const& actionRewardsGetter,
81 storm::storage::MaximalEndComponent const& component) {
82 // For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
83
84 // Allocate memory for the nondeterministic choices.
85 if (this->isProduceSchedulerSet()) {
86 if (!this->_producedOptimalChoices.is_initialized()) {
87 this->_producedOptimalChoices.emplace();
88 }
89 this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount());
90 }
91
92 auto trivialResult = this->computeLraForTrivialMec(env, stateRewardsGetter, actionRewardsGetter, component);
93 if (trivialResult.first) {
94 return trivialResult.second;
95 }
96
97 // Solve nontrivial MEC with the method specified in the settings
98 storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
100 method != storm::solver::LraMethod::LinearProgramming) {
102 "Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly "
103 "specify a different LRA method.");
104 method = storm::solver::LraMethod::LinearProgramming;
105 } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
107 "Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly "
108 "specify a different LRA method.");
109 method = storm::solver::LraMethod::ValueIteration;
110 }
111 STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration,
112 "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
113 if (method == storm::solver::LraMethod::LinearProgramming) {
114 return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, component);
115 } else if (method == storm::solver::LraMethod::ValueIteration) {
116 return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, component);
117 } else {
118 STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
119 }
120}
121
122template<typename ValueType>
124 Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter,
125 storm::storage::MaximalEndComponent const& component) {
126 // If the component only consists of a single state, we compute the LRA value directly
127 if (component.size() == 1) {
128 auto const& element = *component.begin();
129 uint64_t state = internal::getComponentElementState(element);
130 auto choiceIt = internal::getComponentElementChoicesBegin(element);
131 if (!this->isContinuousTime()) {
132 // This is an MDP.
133 // Find the choice with the highest/lowest reward
134 ValueType bestValue = actionRewardsGetter(*choiceIt);
135 uint64_t bestChoice = *choiceIt;
136 for (++choiceIt; choiceIt != internal::getComponentElementChoicesEnd(element); ++choiceIt) {
137 ValueType currentValue = actionRewardsGetter(*choiceIt);
138 if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
139 bestValue = std::move(currentValue);
140 bestChoice = *choiceIt;
141 }
142 }
143 if (this->isProduceSchedulerSet()) {
144 this->_producedOptimalChoices.get()[state] = bestChoice - this->_transitionMatrix.getRowGroupIndices()[state];
145 }
146 bestValue += stateRewardsGetter(state);
147 return {true, bestValue};
148 } else {
149 // In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one
150 // possible choice.
151 STORM_LOG_ASSERT(this->_markovianStates != nullptr,
152 "Nondeterministic continuous time model without Markovian states... Is this a not a Markov Automaton?");
153 STORM_LOG_THROW(this->_markovianStates->get(state), storm::exceptions::InvalidOperationException,
154 "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
155 STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
156 if (this->isProduceSchedulerSet()) {
157 this->_producedOptimalChoices.get()[state] = 0;
158 }
159 ValueType result = stateRewardsGetter(state) +
160 (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
161 return {true, result};
162 }
163 }
164 return {false, storm::utility::zero<ValueType>()};
165}
166
167template<typename ValueType>
169 ValueGetter const& actionRewardsGetter,
171 // Collect some parameters of the computation
172 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
173 std::vector<uint64_t>* optimalChoices = nullptr;
174 if (this->isProduceSchedulerSet()) {
175 optimalChoices = &this->_producedOptimalChoices.get();
176 }
177
178 // Now create a helper and perform the algorithm
179 if (this->isContinuousTime()) {
180 // We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
183 viHelper(mec, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
184 return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates, &this->getOptimizationDirection(),
185 optimalChoices);
186 } else {
187 // We assume an MDP (with nondeterministic timed states and no instant states)
190 viHelper(mec, this->_transitionMatrix, aperiodicFactor);
191 return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
192 }
193}
194
195template<typename ValueType>
197 ValueGetter const& actionRewardsGetter,
199 // Create an LP solver
200 auto solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
201
202 // Now build the LP formulation as described in:
203 // Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
204 solver->setOptimizationDirection(invert(this->getOptimizationDirection()));
205
206 // Create variables
207 // TODO: Investigate whether we can easily make the variables bounded
208 std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
209 for (auto const& stateChoicesPair : mec) {
210 std::string variableName = "x" + std::to_string(stateChoicesPair.first);
211 stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
212 }
213 storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>());
214 solver->update();
215
216 // Add constraints.
217 for (auto const& stateChoicesPair : mec) {
218 uint_fast64_t state = stateChoicesPair.first;
219 bool stateIsMarkovian = this->_markovianStates && this->_markovianStates->get(state);
220
221 // Now create a suitable constraint for each choice
222 // x_s {≤, ≥} -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
223 for (auto choice : stateChoicesPair.second) {
224 std::vector<storm::expressions::Expression> summands;
225 auto matrixRow = this->_transitionMatrix.getRow(choice);
226 summands.reserve(matrixRow.getNumberOfEntries() + 2);
227 // add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
228 if (stateIsMarkovian) {
229 summands.push_back(-(k / solver->getManager().rational((*this->_exitRates)[state])));
230 } else if (!this->isContinuousTime()) {
231 summands.push_back(-k);
232 }
233 // add sum_s' P(s,act,s') * x_s'
234 for (auto const& element : matrixRow) {
235 summands.push_back(stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue()));
236 }
237 // add value for state and choice
238 ValueType value;
239 if (stateIsMarkovian) {
240 // divide state reward with exit rate
241 value = stateRewardsGetter(state) / (*this->_exitRates)[state] + actionRewardsGetter(choice);
242 } else if (!this->isContinuousTime()) {
243 // in discrete time models no scaling is needed
244 value = stateRewardsGetter(state) + actionRewardsGetter(choice);
245 } else {
246 // state is a probabilistic state of a Markov automaton. The state reward is not collected
247 value = actionRewardsGetter(choice);
248 }
249 summands.push_back(solver->getConstant(value));
251 if (this->minimize()) {
252 constraint = stateToVariableMap.at(state) <= storm::expressions::sum(summands);
253 } else {
254 constraint = stateToVariableMap.at(state) >= storm::expressions::sum(summands);
255 }
256 solver->addConstraint("s" + std::to_string(state) + "," + std::to_string(choice), constraint);
257 }
258 }
259
260 solver->optimize();
261 STORM_LOG_THROW(!this->isProduceSchedulerSet(), storm::exceptions::NotImplementedException,
262 "Scheduler extraction is not yet implemented for LP based LRA method.");
263 return solver->getContinuousValue(k);
264}
265
271template<typename ValueType>
272void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix,
273 std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfNonComponentStates, uint64_t const& currentSspChoice,
275 // As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
276 std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
277
278 for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
279 if (!storm::utility::isZero(transition.getValue())) {
280 auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
281 // Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
282 // decide whether the transition leads to a component state or not
283 if (sspTransitionTarget < numberOfNonComponentStates) {
284 // If the target state is not contained in a component, we can copy over the entry.
285 sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
286 } else {
287 // If the target state is contained in component i, we need to add the probability to the corresponding field in the vector
288 // so that we are able to write the cumulative probability to the component into the matrix later.
289 auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue());
290 if (!insertionRes.second) {
291 // sspTransitionTarget already existed in the map, i.e., there already was a transition to that component.
292 // Hence, we add up the probabilities.
293 insertionRes.first->second += transition.getValue();
294 }
295 }
296 }
297 }
298
299 // Now insert all (cumulative) probability values that target a component.
300 for (auto const& componentToProbEntry : auxiliaryStateToProbabilityMap) {
301 sspMatrixBuilder.addNextValue(currentSspChoice, componentToProbEntry.first, componentToProbEntry.second);
302 }
303}
304
305template<typename ValueType>
306std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(
307 std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent,
308 uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap) {
309 auto const& choiceIndices = this->_transitionMatrix.getRowGroupIndices();
310
311 std::vector<ValueType> rhs;
312 uint64_t numberOfSspStates = numberOfNonComponentStates + this->_longRunComponentDecomposition->size();
313 storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates, 0, true, true, numberOfSspStates);
314 // If the source state of a transition is not contained in any component, we copy its choices (and perform the necessary modifications).
315 uint64_t currentSspChoice = 0;
316 for (auto nonComponentState : statesNotInComponent) {
317 sspMatrixBuilder.newRowGroup(currentSspChoice);
318 for (uint64_t choice = choiceIndices[nonComponentState]; choice < choiceIndices[nonComponentState + 1]; ++choice, ++currentSspChoice) {
319 rhs.push_back(storm::utility::zero<ValueType>());
320 addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
321 }
322 }
323 // Now we construct the choices for the auxiliary states which reflect former Component states.
324 for (uint64_t componentIndex = 0; componentIndex < this->_longRunComponentDecomposition->size(); ++componentIndex) {
325 auto const& component = (*this->_longRunComponentDecomposition)[componentIndex];
326 sspMatrixBuilder.newRowGroup(currentSspChoice);
327 // For nondeterministic models it might still be that we leave the component again. This needs to be reflected in the SSP
328 // by adding the "exiting" choices of the MEC to the axiliary states
329 for (auto const& element : component) {
330 uint64_t componentState = internal::getComponentElementState(element);
331 for (uint64_t choice = choiceIndices[componentState]; choice < choiceIndices[componentState + 1]; ++choice) {
332 // If the choice is not contained in the component itself, we have to add a similar distribution to the auxiliary state.
333 if (!internal::componentElementChoicesContains(element, choice)) {
334 rhs.push_back(storm::utility::zero<ValueType>());
335 addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
336 if (sspComponentExitChoicesToOriginalMap) {
337 // Later we need to be able to map this choice back to the original input model
338 sspComponentExitChoicesToOriginalMap->emplace_back(componentState, choice - choiceIndices[componentState]);
339 }
340 ++currentSspChoice;
341 }
342 }
343 }
344 // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the component.
345 rhs.push_back(mecLraValues[componentIndex]);
346 if (sspComponentExitChoicesToOriginalMap) {
347 // Insert some invalid values so we can later detect that this choice is not an exit choice
348 sspComponentExitChoicesToOriginalMap->emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
349 }
350 ++currentSspChoice;
351 }
352 return std::make_pair(sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates), std::move(rhs));
353}
354
355template<typename ValueType>
357 std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap,
358 storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates,
359 std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap) {
360 // We first take care of non-mec states
361 storm::utility::vector::setVectorValues(this->_producedOptimalChoices.get(), statesNotInComponent, sspChoices);
362 // Secondly, we consider MEC states. There are 3 cases for each MEC state:
363 // 1. The SSP choices encode that we want to stay in the MEC
364 // 2. The SSP choices encode that we want to leave the MEC and
365 // a) we take an exit (non-MEC) choice at the given state
366 // b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
367 uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfNonComponentStates];
368 for (auto const& mec : *this->_longRunComponentDecomposition) {
369 // Get the sspState of this MEC (using one representative mec state)
370 auto const& sspState = inputToSspStateMap[mec.begin()->first];
371 uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
372 // Obtain the state and choice of the original model to which the selected choice corresponds.
373 auto const& originalStateChoice = sspComponentExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
374 // Check if we are in Case 1 or 2
375 if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
376 // The optimal choice is to stay in this mec (Case 1)
377 // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
378 STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
379 } else {
380 // The best choice is to leave this MEC via the selected state and choice. (Case 2)
381 // Set the exit choice (Case 2.a)
382 this->_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
383 // The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
384 // Perform a backwards search from the exit state, only using MEC choices
385 // We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
386 for (auto const& stateActions : mec) {
387 if (stateActions.first != originalStateChoice.first) {
388 this->_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
389 }
390 }
391 // Ensure that backwards transitions are available
392 this->createBackwardTransitions();
393 // Now start a backwards DFS
394 std::vector<uint64_t> stack = {originalStateChoice.first};
395 while (!stack.empty()) {
396 uint64_t currentState = stack.back();
397 stack.pop_back();
398 for (auto const& backwardsTransition : this->_backwardTransitions->getRowGroup(currentState)) {
399 uint64_t predecessorState = backwardsTransition.getColumn();
400 if (mec.containsState(predecessorState)) {
401 auto& selectedPredChoice = this->_producedOptimalChoices.get()[predecessorState];
402 if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
403 // We don't already have a choice for this predecessor.
404 // We now need to check whether there is a *MEC* choice leading to currentState
405 for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
406 for (auto const& forwardTransition : this->_transitionMatrix.getRow(predChoice)) {
407 if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
408 // Playing this choice (infinitely often) will lead to current state (infinitely often)!
409 selectedPredChoice = predChoice - this->_transitionMatrix.getRowGroupIndices()[predecessorState];
410 stack.push_back(predecessorState);
411 break;
412 }
413 }
414 if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) {
415 break;
416 }
417 }
418 }
419 }
420 }
421 }
422 }
423 }
424}
425
426template<typename ValueType>
428 std::vector<ValueType> const& componentLraValues) {
429 STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
430
431 // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
432 // which redirects all transitions leading to a former component state to a new auxiliary state.
433 // There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
434
435 // First gather the states that are part of a component
436 // and create a mapping from states that lie in a component to the corresponding component index.
437 storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
438 std::vector<uint64_t> inputToSspStateMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
439 for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
440 for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
441 uint64_t state = internal::getComponentElementState(element);
442 statesInComponents.set(state);
443 inputToSspStateMap[state] = currentComponentIndex;
444 }
445 }
446 // Now take care of the non-component states. Note that the order of these states will be preserved.
447 uint64_t numberOfNonComponentStates = 0;
448 storm::storage::BitVector statesNotInComponent = ~statesInComponents;
449 for (auto nonComponentState : statesNotInComponent) {
450 inputToSspStateMap[nonComponentState] = numberOfNonComponentStates;
451 ++numberOfNonComponentStates;
452 }
453 // Finalize the mapping for the component states which now still assigns component states to to their component index.
454 // To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
455 // number of states that are not in a component.
456 for (auto mecState : statesInComponents) {
457 inputToSspStateMap[mecState] += numberOfNonComponentStates;
458 }
459
460 // For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
461 // corresponding choices in the original model.
462 std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspComponentExitChoicesToOriginalMap;
463
464 // The next step is to create the SSP matrix and the right-hand side of the SSP.
465 auto sspMatrixVector = buildSspMatrixVector(componentLraValues, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates,
466 this->isProduceSchedulerSet() ? &sspComponentExitChoicesToOriginalMap : nullptr);
467
468 // Set-up a solver
471 minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
472 requirements.clearBounds();
473 STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException,
474 "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
475 std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrixVector.first);
476 solver->setHasUniqueSolution();
477 solver->setHasNoEndComponents();
478 solver->setTrackScheduler(this->isProduceSchedulerSet());
479 auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
480 solver->setLowerBound(*lowerUpperBounds.first);
481 solver->setUpperBound(*lowerUpperBounds.second);
482 solver->setRequirementsChecked();
483
484 // Solve the equation system
485 std::vector<ValueType> x(sspMatrixVector.first.getRowGroupCount());
486 solver->solveEquations(env, this->getOptimizationDirection(), x, sspMatrixVector.second);
487
488 // Prepare scheduler (if requested)
489 if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
490 // Translate result for ssp matrix to original model
491 constructOptimalChoices(solver->getSchedulerChoices(), sspMatrixVector.first, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates,
492 sspComponentExitChoicesToOriginalMap);
493 } else {
494 STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
495 }
496
497 // Prepare result vector.
498 // For efficiency reasons, we re-use the memory of our rhs for this!
499 std::vector<ValueType> result = std::move(sspMatrixVector.second);
500 result.resize(this->_transitionMatrix.getRowGroupCount());
501 result.shrink_to_fit();
502 storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x);
503 return result;
504}
505
508
509} // namespace helper
510} // namespace modelchecker
511} // namespace storm
SolverEnvironment & solver()
storm::RationalNumber const & getAperiodicFactor() const
storm::solver::LraMethod const & getNondetLraMethod() const
LongRunAverageSolverEnvironment & lra()
Expression rational(double value) const
Creates an expression that characterizes the given rational literal.
ExpressionManager const & getManager() const
Retrieves the manager responsible for this variable.
Definition Variable.cpp:54
Helper class for model checking queries that depend on the long run behavior of the (nondeterministic...
Helper class for model checking queries that depend on the long run behavior of the (nondeterministic...
virtual std::vector< ValueType > buildAndSolveSsp(Environment const &env, std::vector< ValueType > const &mecLraValues) override
std::pair< storm::storage::SparseMatrix< ValueType >, std::vector< ValueType > > buildSspMatrixVector(std::vector< ValueType > const &mecLraValues, std::vector< uint64_t > const &inputToSspStateMap, storm::storage::BitVector const &statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector< std::pair< uint64_t, uint64_t > > *sspComponentExitChoicesToOriginalMap)
SparseInfiniteHorizonHelper< ValueType, true >::ValueGetter ValueGetter
Function mapping from indices to values.
virtual ValueType computeLraForComponent(Environment const &env, ValueGetter const &stateValuesGetter, ValueGetter const &actionValuesGetter, storm::storage::MaximalEndComponent const &component) override
void constructOptimalChoices(std::vector< uint64_t > const &sspChoices, storm::storage::SparseMatrix< ValueType > const &sspMatrix, std::vector< uint64_t > const &inputToSspStateMap, storm::storage::BitVector const &statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector< std::pair< uint64_t, uint64_t > > const &sspComponentExitChoicesToOriginalMap)
std::pair< bool, ValueType > computeLraForTrivialMec(Environment const &env, ValueGetter const &stateValuesGetter, ValueGetter const &actionValuesGetter, storm::storage::MaximalEndComponent const &mec)
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix< ValueType > const &transitionMatrix)
Initializes the helper for a discrete time model (i.e.
ValueType computeLraForMecLp(Environment const &env, ValueGetter const &stateValuesGetter, ValueGetter const &actionValuesGetter, storm::storage::MaximalEndComponent const &mec)
As computeLraForMec but uses linear programming as a solution method (independent of what is set in e...
ValueType computeLraForMecVi(Environment const &env, ValueGetter const &stateValuesGetter, ValueGetter const &actionValuesGetter, storm::storage::MaximalEndComponent const &mec)
As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
Helper class that performs iterations of the value iteration method.
Definition LraViHelper.h:42
std::string getEnabledRequirementsAsString() const
Returns a string that enumerates the enabled requirements.
A bit vector that is internally represented as a vector of 64-bit values.
Definition BitVector.h:18
void set(uint_fast64_t index, bool value=true)
Sets the given truth value at the given index.
This class represents a maximal end-component of a nondeterministic model.
iterator begin()
Retrieves an iterator that points to the first state and its choices in the MEC.
This class defines which action is chosen in a particular state of a non-deterministic model.
Definition Scheduler.h:18
void setChoice(SchedulerChoice< ValueType > const &choice, uint_fast64_t modelState, uint_fast64_t memoryState=0)
Sets the choice defined by the scheduler for the given state.
Definition Scheduler.cpp:37
index_type getNumberOfEntries() const
Retrieves the number of entries in the rows.
A class that can be used to build a sparse matrix by adding value by value.
void addNextValue(index_type row, index_type column, value_type const &value)
Sets the matrix entry at the given row and column to the given value.
void newRowGroup(index_type startingRow)
Starts a new row group in the matrix.
SparseMatrix< value_type > build(index_type overriddenRowCount=0, index_type overriddenColumnCount=0, index_type overriddenRowGroupCount=0)
A class that holds a possibly non-square matrix in the compressed row storage format.
const_rows getRow(index_type row) const
Returns an object representing the given row.
std::vector< index_type > const & getRowGroupIndices() const
Returns the grouping of rows of this matrix.
#define STORM_LOG_INFO(message)
Definition logging.h:29
#define STORM_LOG_ASSERT(cond, message)
Definition macros.h:11
#define STORM_LOG_ERROR_COND(cond, message)
Definition macros.h:52
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
Expression sum(std::vector< storm::expressions::Expression > const &expressions)
bool componentElementChoicesContains(typename storm::storage::StronglyConnectedComponent::value_type const &element, uint64_t choice)
uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const &element)
Auxiliary functions that deal with the different kinds of components (MECs on potentially nondetermin...
uint64_t const * getComponentElementChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const &element)
constexpr uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const &)
@ NondetTsNoIs
deterministic choice at timed states, deterministic choice at instant states (as in Markov Automata w...
@ DetTsNondetIs
deterministic choice at timed states, no instant states (as in DTMCs and CTMCs)
uint64_t const * getComponentElementChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const &element)
void addSspMatrixChoice(uint64_t const &inputMatrixChoice, storm::storage::SparseMatrix< ValueType > const &inputTransitionMatrix, std::vector< uint64_t > const &inputToSspStateMap, uint64_t const &numberOfNonComponentStates, uint64_t const &currentSspChoice, storm::storage::SparseMatrixBuilder< ValueType > &sspMatrixBuilder)
Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i....
void setVectorValues(std::vector< T > &vector, storm::storage::BitVector const &positions, std::vector< T > const &values)
Sets the provided values at the provided positions in the given vector.
Definition vector.h:83
void selectVectorValues(std::vector< T > &vector, storm::storage::BitVector const &positions, std::vector< T > const &values)
Selects the elements from a vector at the specified positions and writes them consecutively into anot...
Definition vector.h:189
bool isZero(ValueType const &a)
Definition constants.cpp:41
LabParser.cpp.
Definition cli.cpp:18