Storm 1.11.1.1
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
GradientDescentInstantiationSearcher.cpp
Go to the documentation of this file.
2#include <cmath>
3#include <cstdint>
4#include <random>
19
20namespace storm {
21namespace derivative {
22
23template<typename FunctionType>
25template<typename FunctionType>
27
28template<typename FunctionType, typename ConstantType>
31 const std::map<VariableType<FunctionType>, ConstantType>& gradient, uint64_t stepNum) {
32 const ConstantType precisionAsConstant =
33 utility::convertNumber<ConstantType>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
34 const CoefficientType<FunctionType> precision =
35 storm::utility::convertNumber<CoefficientType<FunctionType>>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
36 CoefficientType<FunctionType> const oldPos = position[steppingParameter];
37 ConstantType const oldPosAsConstant = utility::convertNumber<ConstantType>(position[steppingParameter]);
38
39 ConstantType projectedGradient;
41 // Project gradient
42 ConstantType newPlainPosition = oldPosAsConstant + precisionAsConstant * gradient.at(steppingParameter);
43 auto const lower =
44 region ? utility::convertNumber<ConstantType>(region->getLowerBoundary(steppingParameter)) : utility::zero<ConstantType>() + precisionAsConstant;
45 auto const upper =
46 region ? utility::convertNumber<ConstantType>(region->getUpperBoundary(steppingParameter)) : utility::one<ConstantType>() - precisionAsConstant;
47 if (newPlainPosition < lower || newPlainPosition > upper) {
48 projectedGradient = 0;
49 } else {
50 projectedGradient = gradient.at(steppingParameter);
51 }
52 } else if (constraintMethod == GradientDescentConstraintMethod::LOGISTIC_SIGMOID) {
53 // We want the derivative of f(logit(x)), this happens to be exp(x) * f'(logit(x)) / (exp(x) + 1)^2
54 const double expX = std::exp(utility::convertNumber<double>(oldPos));
55 projectedGradient = gradient.at(steppingParameter) * utility::convertNumber<ConstantType>(expX / std::pow(expX + 1, 2));
56 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_INFINITY) {
57 if (oldPosAsConstant < precisionAsConstant) {
58 projectedGradient = 1000;
59 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
60 projectedGradient = -1000;
61 } else {
62 projectedGradient = gradient.at(steppingParameter);
63 }
64 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC) {
65 // Our barrier is:
66 // log(x) if 0 < x < 0.5
67 // log(1 - x) if 0.5 <= x < 1
68 // -infinity otherwise
69 // The gradient of this is
70 // 1/x, 1/(1-x), +/-infinity respectively
71 if (oldPosAsConstant >= precisionAsConstant && oldPosAsConstant <= utility::one<ConstantType>() - precisionAsConstant) {
72 /* const double mu = (double) parameters.size() / (double) stepNum; */
73 if (oldPosAsConstant * 2 < utility::one<ConstantType>()) {
74 projectedGradient = gradient.at(steppingParameter) + logarithmicBarrierTerm / (oldPosAsConstant - precisionAsConstant);
75 } else {
76 projectedGradient =
77 gradient.at(steppingParameter) - logarithmicBarrierTerm / (utility::one<ConstantType>() - precisionAsConstant - oldPosAsConstant);
78 }
79 } else {
80 if (oldPosAsConstant < precisionAsConstant) {
81 projectedGradient = utility::one<ConstantType>() / logarithmicBarrierTerm;
82 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
83 projectedGradient = -utility::one<ConstantType>() / logarithmicBarrierTerm;
84 }
85 }
86 } else {
87 projectedGradient = gradient.at(steppingParameter);
88 }
89
90 // Compute step based on used gradient descent method
91 ConstantType step;
92 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
93 // For this algorihm, see the various sources available on the ADAM algorithm. This implementation should
94 // be correct, as it is compared with a run of keras's ADAM optimizer in the test.
95 adam->decayingStepAverage[steppingParameter] =
96 adam->averageDecay * adam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - adam->averageDecay) * projectedGradient;
97 adam->decayingStepAverageSquared[steppingParameter] = adam->squaredAverageDecay * adam->decayingStepAverageSquared[steppingParameter] +
98 (utility::one<ConstantType>() - adam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
99
100 const ConstantType correctedGradient =
101 adam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->averageDecay, stepNum + 1));
102 const ConstantType correctedSquaredGradient =
103 adam->decayingStepAverageSquared[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->squaredAverageDecay, stepNum + 1));
104
105 const ConstantType toSqrt = correctedSquaredGradient;
106 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
107
108 step = (adam->learningRate / (sqrtResult + precisionAsConstant)) * correctedGradient;
109 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
110 // You can compare this with the RAdam paper's "Algorithm 2: Rectified Adam".
111 // The line numbers and comments are matched.
112 // Initializing / Compute Gradient: Already happened.
113 // 2: Compute maximum length of approximated simple moving average
114 const ConstantType maxLengthApproxSMA = 2 / (utility::one<ConstantType>() - radam->squaredAverageDecay) - utility::one<ConstantType>();
115
116 // 5: Update exponential moving 2nd moment
117 radam->decayingStepAverageSquared[steppingParameter] = radam->squaredAverageDecay * radam->decayingStepAverageSquared[steppingParameter] +
118 (utility::one<ConstantType>() - radam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
119 // 6: Update exponential moving 1st moment
120 radam->decayingStepAverage[steppingParameter] =
121 radam->averageDecay * radam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - radam->averageDecay) * projectedGradient;
122 // 7: Compute bias corrected moving average
123 const ConstantType biasCorrectedMovingAverage =
124 radam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(radam->averageDecay, stepNum + 1));
125 const ConstantType squaredAverageDecayPow = utility::pow(radam->squaredAverageDecay, stepNum + 1);
126 // 8: Compute the length of the approximated single moving average
127 const ConstantType lengthApproxSMA =
128 maxLengthApproxSMA -
129 ((2 * (utility::convertNumber<ConstantType>(stepNum) + utility::one<ConstantType>()) * squaredAverageDecayPow) / (1 - squaredAverageDecayPow));
130 // 9: If the variance is tractable, i.e. lengthApproxSMA > 4, then
131 if (lengthApproxSMA > 4) {
132 // 10: Compute adaptive learning rate
133 const ConstantType adaptiveLearningRate =
134 constantTypeSqrt((utility::one<ConstantType>() - squaredAverageDecayPow) / radam->decayingStepAverageSquared[steppingParameter]);
135 // 11: Compute the variance rectification term
136 const ConstantType varianceRectification =
137 constantTypeSqrt(((lengthApproxSMA - 4) / (maxLengthApproxSMA - 4)) * ((lengthApproxSMA - 2) / (maxLengthApproxSMA - 2)) *
138 ((maxLengthApproxSMA) / (lengthApproxSMA)));
139 // 12: Update parameters with adaptive momentum
140 step = radam->learningRate * varianceRectification * biasCorrectedMovingAverage * adaptiveLearningRate;
141 } else {
142 // 14: Update parameters with un-adapted momentum
143 step = radam->learningRate * biasCorrectedMovingAverage;
144 }
145 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
146 rmsProp->rootMeanSquare[steppingParameter] = rmsProp->averageDecay * rmsProp->rootMeanSquare[steppingParameter] +
147 (utility::one<ConstantType>() - rmsProp->averageDecay) * projectedGradient * projectedGradient;
148
149 const ConstantType toSqrt = rmsProp->rootMeanSquare[steppingParameter] + precisionAsConstant;
150 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
151
152 step = (rmsProp->learningRate / sqrtResult) * projectedGradient;
153 } else if (Plain* plain = boost::get<Plain>(&gradientDescentType)) {
154 if (useSignsOnly) {
155 if (projectedGradient < utility::zero<ConstantType>()) {
156 step = -plain->learningRate;
157 } else if (projectedGradient > utility::zero<ConstantType>()) {
158 step = plain->learningRate;
159 } else {
160 step = utility::zero<ConstantType>();
161 }
162 } else {
163 step = plain->learningRate * projectedGradient;
164 }
165 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
166 if (useSignsOnly) {
167 if (projectedGradient < utility::zero<ConstantType>()) {
168 step = -momentum->learningRate;
169 } else if (projectedGradient > utility::zero<ConstantType>()) {
170 step = momentum->learningRate;
171 } else {
172 step = utility::zero<ConstantType>();
173 }
174 } else {
175 step = momentum->learningRate * projectedGradient;
176 }
177 step += momentum->momentumTerm * momentum->pastStep.at(steppingParameter);
178 momentum->pastStep[steppingParameter] = step;
179 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
180 if (useSignsOnly) {
181 if (projectedGradient < utility::zero<ConstantType>()) {
182 step = -nesterov->learningRate;
183 } else if (projectedGradient > utility::zero<ConstantType>()) {
184 step = nesterov->learningRate;
185 } else {
186 step = utility::zero<ConstantType>();
187 }
188 } else {
189 step = nesterov->learningRate * projectedGradient;
190 }
191 step += nesterov->momentumTerm * nesterov->pastStep.at(steppingParameter);
192 nesterov->pastStep[steppingParameter] = step;
193 } else {
194 STORM_LOG_ERROR("GradientDescentType was not a known one");
195 }
196
197 const CoefficientType<FunctionType> convertedStep = utility::convertNumber<CoefficientType<FunctionType>>(step);
198 const CoefficientType<FunctionType> newPos = position[steppingParameter] + convertedStep;
199 position[steppingParameter] = newPos;
200 // Map parameter back to region
202 auto const lower = region ? region->getLowerBoundary(steppingParameter) : utility::zero<CoefficientType<FunctionType>>() + precision;
203 auto const upper = region ? region->getUpperBoundary(steppingParameter) : utility::one<CoefficientType<FunctionType>>() - precision;
204
205 position[steppingParameter] = utility::max(lower, position[steppingParameter]);
206 position[steppingParameter] = utility::min(upper, position[steppingParameter]);
207 }
208 return utility::abs<ConstantType>(oldPosAsConstant - utility::convertNumber<ConstantType>(position[steppingParameter]));
209}
210
211template<typename FunctionType, typename ConstantType>
212ConstantType GradientDescentInstantiationSearcher<FunctionType, ConstantType>::stochasticGradientDescent(
213 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>& position) {
214 uint_fast64_t initialStateModel = model.getStates("init").getNextSetIndex(0);
215
216 ConstantType currentValue;
217 switch (this->synthesisTask->getBound().comparisonType) {
220 currentValue = -utility::infinity<ConstantType>();
221 break;
224 currentValue = utility::infinity<ConstantType>();
225 break;
226 }
227
228 // We count the number of iterations where the value changes less than the threshold, and terminate if it is large enough.
229 uint64_t tinyChangeIterations = 0;
230
231 std::map<VariableType<FunctionType>, ConstantType> deltaVector;
232
233 std::vector<VariableType<FunctionType>> parameterEnumeration;
234 for (auto parameter : this->parameters) {
235 parameterEnumeration.push_back(parameter);
236 }
237
238 utility::Stopwatch printUpdateStopwatch;
239 printUpdateStopwatch.start();
240
241 // The index to keep track of what parameter(s) to consider next.
242 // The "mini-batch", so the parameters to consider, are parameterNum..parameterNum+miniBatchSize-1
243 uint_fast64_t parameterNum = 0;
244 for (uint_fast64_t stepNum = 0; true; ++stepNum) {
245 if (printUpdateStopwatch.getTimeInSeconds() >= 15) {
246 printUpdateStopwatch.restart();
247 STORM_PRINT_AND_LOG("Currently at " << currentValue << "\n");
248 }
249
250 std::vector<VariableType<FunctionType>> miniBatch;
251 for (uint_fast64_t i = parameterNum; i < std::min((uint_fast64_t)parameterEnumeration.size(), parameterNum + miniBatchSize); i++) {
252 miniBatch.push_back(parameterEnumeration[i]);
253 }
254
255 ConstantType oldValue = currentValue;
256 CoefficientType<FunctionType> const precision = storm::utility::convertNumber<CoefficientType<FunctionType>>(
257 storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
258
259 // If nesterov is enabled, we need to compute the gradient on the predicted position
260 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> nesterovPredictedPosition(position);
261 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
262 CoefficientType<FunctionType> const upperBound = (utility::one<CoefficientType<FunctionType>>() - precision);
263 for (auto const& parameter : miniBatch) {
264 ConstantType const addedTerm = nesterov->momentumTerm * nesterov->pastStep[parameter];
265 nesterovPredictedPosition[parameter] += storm::utility::convertNumber<CoefficientType<FunctionType>>(addedTerm);
266 nesterovPredictedPosition[parameter] = utility::max(precision, nesterovPredictedPosition[parameter]);
267 nesterovPredictedPosition[parameter] = utility::min(upperBound, nesterovPredictedPosition[parameter]);
268 }
269 }
271 // Apply sigmoid function
272 for (auto const& parameter : parameters) {
273 nesterovPredictedPosition[parameter] =
274 utility::one<CoefficientType<FunctionType>>() /
275 (utility::one<CoefficientType<FunctionType>>() +
276 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(nesterovPredictedPosition[parameter]))));
277 }
278 }
279
280 // Compute the value of our position and terminate if it satisfies the bound or is
281 // zero or one when computing probabilities. The "valueVector" (just the probability/expected
282 // reward for eventually reaching the target from every state) is also used for computing
283 // the gradient later. We only need one computation of the "valueVector" per mini-batch.
284 //
285 // If nesterov is activated, we need to do this twice. First, to check the value of the current position.
286 // Second, to compute the valueVector at the nesterovPredictedPosition.
287 // If nesterov is deactivated, then nesterovPredictedPosition == position.
288
289 // Are we at a stochastic (in bounds) position?
290 bool stochasticPosition = true;
291 for (auto const& parameter : parameters) {
292 if (nesterovPredictedPosition[parameter] < 0 + precision || nesterovPredictedPosition[parameter] > 1 - precision) {
293 stochasticPosition = false;
294 break;
295 }
296 }
297
298 bool computeValue = true;
300 if (!stochasticPosition) {
301 computeValue = false;
302 }
303 }
304
305 if (computeValue) {
306 std::unique_ptr<storm::modelchecker::CheckResult> intermediateResult = instantiationModelChecker->check(env, nesterovPredictedPosition);
307 std::vector<ConstantType> valueVector = intermediateResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
308 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
309 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> modelCheckPosition(position);
311 for (auto const& parameter : parameters) {
312 modelCheckPosition[parameter] =
313 utility::one<CoefficientType<FunctionType>>() /
314 (utility::one<CoefficientType<FunctionType>>() +
315 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(modelCheckPosition[parameter]))));
316 }
317 }
318 std::unique_ptr<storm::modelchecker::CheckResult> terminationResult = instantiationModelChecker->check(env, modelCheckPosition);
319 std::vector<ConstantType> terminationValueVector = terminationResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
320 currentValue = terminationValueVector[initialStateModel];
321 } else {
322 currentValue = valueVector[initialStateModel];
323 }
324
325 if (synthesisTask->getBound().isSatisfied(currentValue) && stochasticPosition) {
326 break;
327 }
328
329 for (auto const& parameter : miniBatch) {
330 auto checkResult = derivativeEvaluationHelper->check(env, nesterovPredictedPosition, parameter, valueVector);
331 ConstantType delta = checkResult->getValueVector()[derivativeEvaluationHelper->getInitialState()];
332 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
333 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
334 delta = -delta;
335 }
336 deltaVector[parameter] = delta;
337 }
338 } else {
339 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
340 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
341 currentValue = utility::infinity<ConstantType>();
342 } else {
343 currentValue = -utility::infinity<ConstantType>();
344 }
345 }
346
347 // Log position and probability information for later use in visualizing the descent, if wished.
348 if (recordRun) {
349 VisualizationPoint point;
350 point.position = nesterovPredictedPosition;
351 point.value = currentValue;
352 walk.push_back(point);
353 }
354
355 // Perform the step. The actualChange is the change in position the step caused. This is different from the
356 // delta in multiple ways: First, it's multiplied with the learning rate and stuff. Second, if the current value
357 // is at epsilon, and the delta would step out of the constrained which is then corrected, the actualChange is the
358 // change from the last to the current corrected position (so might be zero while the delta is not).
359 for (auto const& parameter : miniBatch) {
360 doStep(parameter, position, deltaVector, stepNum);
361 }
362
363 if (storm::utility::abs<ConstantType>(oldValue - currentValue) < terminationEpsilon) {
364 tinyChangeIterations += miniBatch.size();
365 if (tinyChangeIterations > parameterEnumeration.size()) {
366 break;
367 }
368 } else {
369 tinyChangeIterations = 0;
370 }
371
372 // Consider the next parameter
373 parameterNum = parameterNum + miniBatchSize;
374 if (parameterNum >= parameterEnumeration.size()) {
375 parameterNum = 0;
376 }
377
379 STORM_LOG_WARN("Aborting Gradient Descent, returning non-optimal value.");
380 break;
381 }
382 }
383 return currentValue;
384}
385
386template<typename FunctionType, typename ConstantType>
387std::pair<std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>, ConstantType>
389 STORM_LOG_ASSERT(this->synthesisTask, "Call setup before calling gradientDescent");
390
391 resetDynamicValues();
392
393 STORM_LOG_ASSERT(this->synthesisTask->isBoundSet(), "Task does not involve a bound.");
394
395 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> bestInstantiation;
396 ConstantType bestValue;
397 switch (this->synthesisTask->getBound().comparisonType) {
400 bestValue = -utility::infinity<ConstantType>();
401 break;
404 bestValue = utility::infinity<ConstantType>();
405 break;
406 }
407
408 std::random_device device;
409 std::default_random_engine engine(device());
410 std::uniform_real_distribution<> dist(0, 1);
411 bool initialGuess = true;
412 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> point;
413 while (true) {
414 STORM_PRINT_AND_LOG("Trying out a new starting point\n");
415 if (initialGuess) {
416 STORM_PRINT_AND_LOG("Trying initial guess (p->0.5 for every parameter p or set start point)\n");
417 }
418 // Generate random starting point
419 for (auto const& param : this->parameters) {
420 if (initialGuess) {
421 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
422 if (startPoint) {
423 point[param] = (*startPoint)[param];
424 } else {
425 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(0.5 + 1e-6);
426 }
427 } else if (!initialGuess && constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC &&
428 logarithmicBarrierTerm > utility::convertNumber<ConstantType>(0.00001)) {
429 // Do nothing
430 } else {
431 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
432 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(dist(engine));
433 }
434 }
435 initialGuess = false;
436
437 /* walk.clear(); */
438
439 stochasticWatch.start();
440 STORM_PRINT_AND_LOG("Starting at " << point << "\n");
441 ConstantType prob = stochasticGradientDescent(point);
442 stochasticWatch.stop();
443
444 bool isFoundPointBetter = false;
445 switch (this->synthesisTask->getBound().comparisonType) {
448 isFoundPointBetter = prob > bestValue;
449 break;
452 isFoundPointBetter = prob < bestValue;
453 break;
454 }
455 if (isFoundPointBetter) {
456 bestInstantiation = point;
457 bestValue = prob;
458 }
459
460 if (synthesisTask->getBound().isSatisfied(bestValue)) {
461 STORM_PRINT_AND_LOG("Aborting because the bound is satisfied\n");
462 break;
464 break;
465 } else {
467 logarithmicBarrierTerm = logarithmicBarrierTerm / 10;
468 STORM_PRINT_AND_LOG("Smaller term\n" << bestValue << "\n" << logarithmicBarrierTerm << "\n");
469 continue;
470 }
471 STORM_PRINT_AND_LOG("Sorry, couldn't satisfy the bound (yet). Best found value so far: " << bestValue << "\n");
472 continue;
473 }
474 }
475
477 // Apply sigmoid function
478 for (auto const& parameter : parameters) {
479 bestInstantiation[parameter] =
480 utility::one<CoefficientType<FunctionType>>() /
482 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(bestInstantiation[parameter]))));
483 }
484 }
485
486 return std::make_pair(bestInstantiation, bestValue);
487}
488
489template<typename FunctionType, typename ConstantType>
491 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
492 for (auto const& parameter : this->parameters) {
493 adam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
494 adam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
495 }
496 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
497 for (auto const& parameter : this->parameters) {
498 radam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
499 radam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
500 }
501 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
502 for (auto const& parameter : this->parameters) {
503 rmsProp->rootMeanSquare[parameter] = utility::zero<ConstantType>();
504 }
505 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
506 for (auto const& parameter : this->parameters) {
507 momentum->pastStep[parameter] = utility::zero<ConstantType>();
508 }
509 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
510 for (auto const& parameter : this->parameters) {
511 nesterov->pastStep[parameter] = utility::zero<ConstantType>();
512 }
513 }
514}
515
516template<typename FunctionType, typename ConstantType>
518 STORM_PRINT("[");
519 for (auto s = walk.begin(); s != walk.end(); ++s) {
520 STORM_PRINT("{");
521 auto point = s->position;
522 for (auto iter = point.begin(); iter != point.end(); ++iter) {
523 STORM_PRINT("\"" << iter->first.name() << "\"");
524 STORM_PRINT(":" << utility::convertNumber<double>(iter->second) << ",");
525 }
526 STORM_PRINT("\"value\":" << s->value << "}");
527 if (std::next(s) != walk.end()) {
528 STORM_PRINT(",");
529 }
530 }
531 STORM_PRINT("]\n");
532 // Print value at last step for data collection
533 STORM_PRINT(storm::utility::convertNumber<double>(walk.at(walk.size() - 1).value) << "\n");
534}
535
536template<typename FunctionType, typename ConstantType>
537std::vector<typename GradientDescentInstantiationSearcher<FunctionType, ConstantType>::VisualizationPoint>
541
544} // namespace derivative
545} // namespace storm
std::vector< VisualizationPoint > getVisualizationWalk()
Get the visualization walk that is recorded if recordRun is set to true in the constructor (false by ...
std::pair< std::map< typename utility::parametric::VariableType< FunctionType >::type, typename utility::parametric::CoefficientType< FunctionType >::type >, ConstantType > gradientDescent()
Perform Gradient Descent.
#define STORM_LOG_WARN(message)
Definition logging.h:25
#define STORM_LOG_ERROR(message)
Definition logging.h:26
#define STORM_LOG_ASSERT(cond, message)
Definition macros.h:11
#define STORM_PRINT_AND_LOG(message)
Definition macros.h:68
#define STORM_PRINT(message)
Define the macros that print information and optionally also log it.
Definition macros.h:62
typename utility::parametric::VariableType< FunctionType >::type VariableType
typename utility::parametric::CoefficientType< FunctionType >::type CoefficientType
bool isTerminate()
Check whether the program should terminate (due to some abort signal).
ValueType max(ValueType const &first, ValueType const &second)
ValueType min(ValueType const &first, ValueType const &second)
ValueType pow(ValueType const &value, int_fast64_t exponent)
ValueType one()
Definition constants.cpp:22
LabParser.cpp.