Storm 1.11.1.1
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
GradientDescentInstantiationSearcher.cpp
Go to the documentation of this file.
2
3#include <cmath>
4#include <random>
5
14
15namespace storm {
16namespace derivative {
17
18template<typename FunctionType>
20template<typename FunctionType>
22
23template<typename FunctionType, typename ConstantType>
26 const std::map<VariableType<FunctionType>, ConstantType>& gradient, uint64_t stepNum) {
27 const ConstantType precisionAsConstant =
28 utility::convertNumber<ConstantType>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
29 const CoefficientType<FunctionType> precision =
30 storm::utility::convertNumber<CoefficientType<FunctionType>>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
31 CoefficientType<FunctionType> const oldPos = position[steppingParameter];
32 ConstantType const oldPosAsConstant = utility::convertNumber<ConstantType>(position[steppingParameter]);
33
34 ConstantType projectedGradient;
36 // Project gradient
37 ConstantType newPlainPosition = oldPosAsConstant + precisionAsConstant * gradient.at(steppingParameter);
38 auto const lower =
39 region ? utility::convertNumber<ConstantType>(region->getLowerBoundary(steppingParameter)) : utility::zero<ConstantType>() + precisionAsConstant;
40 auto const upper =
41 region ? utility::convertNumber<ConstantType>(region->getUpperBoundary(steppingParameter)) : utility::one<ConstantType>() - precisionAsConstant;
42 if (newPlainPosition < lower || newPlainPosition > upper) {
43 projectedGradient = 0;
44 } else {
45 projectedGradient = gradient.at(steppingParameter);
46 }
47 } else if (constraintMethod == GradientDescentConstraintMethod::LOGISTIC_SIGMOID) {
48 // We want the derivative of f(logit(x)), this happens to be exp(x) * f'(logit(x)) / (exp(x) + 1)^2
49 const double expX = std::exp(utility::convertNumber<double>(oldPos));
50 projectedGradient = gradient.at(steppingParameter) * utility::convertNumber<ConstantType>(expX / std::pow(expX + 1, 2));
51 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_INFINITY) {
52 if (oldPosAsConstant < precisionAsConstant) {
53 projectedGradient = 1000;
54 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
55 projectedGradient = -1000;
56 } else {
57 projectedGradient = gradient.at(steppingParameter);
58 }
59 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC) {
60 // Our barrier is:
61 // log(x) if 0 < x < 0.5
62 // log(1 - x) if 0.5 <= x < 1
63 // -infinity otherwise
64 // The gradient of this is
65 // 1/x, 1/(1-x), +/-infinity respectively
66 if (oldPosAsConstant >= precisionAsConstant && oldPosAsConstant <= utility::one<ConstantType>() - precisionAsConstant) {
67 /* const double mu = (double) parameters.size() / (double) stepNum; */
68 if (oldPosAsConstant * 2 < utility::one<ConstantType>()) {
69 projectedGradient = gradient.at(steppingParameter) + logarithmicBarrierTerm / (oldPosAsConstant - precisionAsConstant);
70 } else {
71 projectedGradient =
72 gradient.at(steppingParameter) - logarithmicBarrierTerm / (utility::one<ConstantType>() - precisionAsConstant - oldPosAsConstant);
73 }
74 } else {
75 if (oldPosAsConstant < precisionAsConstant) {
76 projectedGradient = utility::one<ConstantType>() / logarithmicBarrierTerm;
77 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
78 projectedGradient = -utility::one<ConstantType>() / logarithmicBarrierTerm;
79 }
80 }
81 } else {
82 projectedGradient = gradient.at(steppingParameter);
83 }
84
85 // Compute step based on used gradient descent method
86 ConstantType step;
87 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
88 // For this algorihm, see the various sources available on the ADAM algorithm. This implementation should
89 // be correct, as it is compared with a run of keras's ADAM optimizer in the test.
90 adam->decayingStepAverage[steppingParameter] =
91 adam->averageDecay * adam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - adam->averageDecay) * projectedGradient;
92 adam->decayingStepAverageSquared[steppingParameter] = adam->squaredAverageDecay * adam->decayingStepAverageSquared[steppingParameter] +
93 (utility::one<ConstantType>() - adam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
94
95 const ConstantType correctedGradient =
96 adam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->averageDecay, stepNum + 1));
97 const ConstantType correctedSquaredGradient =
98 adam->decayingStepAverageSquared[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->squaredAverageDecay, stepNum + 1));
99
100 const ConstantType toSqrt = correctedSquaredGradient;
101 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
102
103 step = (adam->learningRate / (sqrtResult + precisionAsConstant)) * correctedGradient;
104 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
105 // You can compare this with the RAdam paper's "Algorithm 2: Rectified Adam".
106 // The line numbers and comments are matched.
107 // Initializing / Compute Gradient: Already happened.
108 // 2: Compute maximum length of approximated simple moving average
109 const ConstantType maxLengthApproxSMA = 2 / (utility::one<ConstantType>() - radam->squaredAverageDecay) - utility::one<ConstantType>();
110
111 // 5: Update exponential moving 2nd moment
112 radam->decayingStepAverageSquared[steppingParameter] = radam->squaredAverageDecay * radam->decayingStepAverageSquared[steppingParameter] +
113 (utility::one<ConstantType>() - radam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
114 // 6: Update exponential moving 1st moment
115 radam->decayingStepAverage[steppingParameter] =
116 radam->averageDecay * radam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - radam->averageDecay) * projectedGradient;
117 // 7: Compute bias corrected moving average
118 const ConstantType biasCorrectedMovingAverage =
119 radam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(radam->averageDecay, stepNum + 1));
120 const ConstantType squaredAverageDecayPow = utility::pow(radam->squaredAverageDecay, stepNum + 1);
121 // 8: Compute the length of the approximated single moving average
122 const ConstantType lengthApproxSMA =
123 maxLengthApproxSMA -
124 ((2 * (utility::convertNumber<ConstantType>(stepNum) + utility::one<ConstantType>()) * squaredAverageDecayPow) / (1 - squaredAverageDecayPow));
125 // 9: If the variance is tractable, i.e. lengthApproxSMA > 4, then
126 if (lengthApproxSMA > 4) {
127 // 10: Compute adaptive learning rate
128 const ConstantType adaptiveLearningRate =
129 constantTypeSqrt((utility::one<ConstantType>() - squaredAverageDecayPow) / radam->decayingStepAverageSquared[steppingParameter]);
130 // 11: Compute the variance rectification term
131 const ConstantType varianceRectification =
132 constantTypeSqrt(((lengthApproxSMA - 4) / (maxLengthApproxSMA - 4)) * ((lengthApproxSMA - 2) / (maxLengthApproxSMA - 2)) *
133 ((maxLengthApproxSMA) / (lengthApproxSMA)));
134 // 12: Update parameters with adaptive momentum
135 step = radam->learningRate * varianceRectification * biasCorrectedMovingAverage * adaptiveLearningRate;
136 } else {
137 // 14: Update parameters with un-adapted momentum
138 step = radam->learningRate * biasCorrectedMovingAverage;
139 }
140 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
141 rmsProp->rootMeanSquare[steppingParameter] = rmsProp->averageDecay * rmsProp->rootMeanSquare[steppingParameter] +
142 (utility::one<ConstantType>() - rmsProp->averageDecay) * projectedGradient * projectedGradient;
143
144 const ConstantType toSqrt = rmsProp->rootMeanSquare[steppingParameter] + precisionAsConstant;
145 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
146
147 step = (rmsProp->learningRate / sqrtResult) * projectedGradient;
148 } else if (Plain* plain = boost::get<Plain>(&gradientDescentType)) {
149 if (useSignsOnly) {
150 if (projectedGradient < utility::zero<ConstantType>()) {
151 step = -plain->learningRate;
152 } else if (projectedGradient > utility::zero<ConstantType>()) {
153 step = plain->learningRate;
154 } else {
155 step = utility::zero<ConstantType>();
156 }
157 } else {
158 step = plain->learningRate * projectedGradient;
159 }
160 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
161 if (useSignsOnly) {
162 if (projectedGradient < utility::zero<ConstantType>()) {
163 step = -momentum->learningRate;
164 } else if (projectedGradient > utility::zero<ConstantType>()) {
165 step = momentum->learningRate;
166 } else {
167 step = utility::zero<ConstantType>();
168 }
169 } else {
170 step = momentum->learningRate * projectedGradient;
171 }
172 step += momentum->momentumTerm * momentum->pastStep.at(steppingParameter);
173 momentum->pastStep[steppingParameter] = step;
174 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
175 if (useSignsOnly) {
176 if (projectedGradient < utility::zero<ConstantType>()) {
177 step = -nesterov->learningRate;
178 } else if (projectedGradient > utility::zero<ConstantType>()) {
179 step = nesterov->learningRate;
180 } else {
181 step = utility::zero<ConstantType>();
182 }
183 } else {
184 step = nesterov->learningRate * projectedGradient;
185 }
186 step += nesterov->momentumTerm * nesterov->pastStep.at(steppingParameter);
187 nesterov->pastStep[steppingParameter] = step;
188 } else {
189 STORM_LOG_ERROR("GradientDescentType was not a known one");
190 }
191
192 const CoefficientType<FunctionType> convertedStep = utility::convertNumber<CoefficientType<FunctionType>>(step);
193 const CoefficientType<FunctionType> newPos = position[steppingParameter] + convertedStep;
194 position[steppingParameter] = newPos;
195 // Map parameter back to region
197 auto const lower = region ? region->getLowerBoundary(steppingParameter) : utility::zero<CoefficientType<FunctionType>>() + precision;
198 auto const upper = region ? region->getUpperBoundary(steppingParameter) : utility::one<CoefficientType<FunctionType>>() - precision;
199
200 position[steppingParameter] = utility::max(lower, position[steppingParameter]);
201 position[steppingParameter] = utility::min(upper, position[steppingParameter]);
202 }
203 return utility::abs<ConstantType>(oldPosAsConstant - utility::convertNumber<ConstantType>(position[steppingParameter]));
204}
205
206template<typename FunctionType, typename ConstantType>
207ConstantType GradientDescentInstantiationSearcher<FunctionType, ConstantType>::stochasticGradientDescent(
208 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>& position) {
209 uint_fast64_t initialStateModel = model.getStates("init").getNextSetIndex(0);
210
211 ConstantType currentValue;
212 switch (this->synthesisTask->getBound().comparisonType) {
215 currentValue = -utility::infinity<ConstantType>();
216 break;
219 currentValue = utility::infinity<ConstantType>();
220 break;
221 }
222
223 // We count the number of iterations where the value changes less than the threshold, and terminate if it is large enough.
224 uint64_t tinyChangeIterations = 0;
225
226 std::map<VariableType<FunctionType>, ConstantType> deltaVector;
227
228 std::vector<VariableType<FunctionType>> parameterEnumeration;
229 for (auto parameter : this->parameters) {
230 parameterEnumeration.push_back(parameter);
231 }
232
233 utility::Stopwatch printUpdateStopwatch;
234 printUpdateStopwatch.start();
235
236 // The index to keep track of what parameter(s) to consider next.
237 // The "mini-batch", so the parameters to consider, are parameterNum..parameterNum+miniBatchSize-1
238 uint_fast64_t parameterNum = 0;
239 for (uint_fast64_t stepNum = 0; true; ++stepNum) {
240 if (printUpdateStopwatch.getTimeInSeconds() >= 15) {
241 printUpdateStopwatch.restart();
242 STORM_PRINT_AND_LOG("Currently at " << currentValue << "\n");
243 }
244
245 std::vector<VariableType<FunctionType>> miniBatch;
246 for (uint_fast64_t i = parameterNum; i < std::min((uint_fast64_t)parameterEnumeration.size(), parameterNum + miniBatchSize); i++) {
247 miniBatch.push_back(parameterEnumeration[i]);
248 }
249
250 ConstantType oldValue = currentValue;
251 CoefficientType<FunctionType> const precision = storm::utility::convertNumber<CoefficientType<FunctionType>>(
252 storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
253
254 // If nesterov is enabled, we need to compute the gradient on the predicted position
255 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> nesterovPredictedPosition(position);
256 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
257 CoefficientType<FunctionType> const upperBound = (utility::one<CoefficientType<FunctionType>>() - precision);
258 for (auto const& parameter : miniBatch) {
259 ConstantType const addedTerm = nesterov->momentumTerm * nesterov->pastStep[parameter];
260 nesterovPredictedPosition[parameter] += storm::utility::convertNumber<CoefficientType<FunctionType>>(addedTerm);
261 nesterovPredictedPosition[parameter] = utility::max(precision, nesterovPredictedPosition[parameter]);
262 nesterovPredictedPosition[parameter] = utility::min(upperBound, nesterovPredictedPosition[parameter]);
263 }
264 }
266 // Apply sigmoid function
267 for (auto const& parameter : parameters) {
268 nesterovPredictedPosition[parameter] =
269 utility::one<CoefficientType<FunctionType>>() /
270 (utility::one<CoefficientType<FunctionType>>() +
271 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(nesterovPredictedPosition[parameter]))));
272 }
273 }
274
275 // Compute the value of our position and terminate if it satisfies the bound or is
276 // zero or one when computing probabilities. The "valueVector" (just the probability/expected
277 // reward for eventually reaching the target from every state) is also used for computing
278 // the gradient later. We only need one computation of the "valueVector" per mini-batch.
279 //
280 // If nesterov is activated, we need to do this twice. First, to check the value of the current position.
281 // Second, to compute the valueVector at the nesterovPredictedPosition.
282 // If nesterov is deactivated, then nesterovPredictedPosition == position.
283
284 // Are we at a stochastic (in bounds) position?
285 bool stochasticPosition = true;
286 for (auto const& parameter : parameters) {
287 if (nesterovPredictedPosition[parameter] < 0 + precision || nesterovPredictedPosition[parameter] > 1 - precision) {
288 stochasticPosition = false;
289 break;
290 }
291 }
292
293 bool computeValue = true;
295 if (!stochasticPosition) {
296 computeValue = false;
297 }
298 }
299
300 if (computeValue) {
301 std::unique_ptr<storm::modelchecker::CheckResult> intermediateResult = instantiationModelChecker->check(env, nesterovPredictedPosition);
302 std::vector<ConstantType> valueVector = intermediateResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
303 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
304 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> modelCheckPosition(position);
306 for (auto const& parameter : parameters) {
307 modelCheckPosition[parameter] =
308 utility::one<CoefficientType<FunctionType>>() /
309 (utility::one<CoefficientType<FunctionType>>() +
310 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(modelCheckPosition[parameter]))));
311 }
312 }
313 std::unique_ptr<storm::modelchecker::CheckResult> terminationResult = instantiationModelChecker->check(env, modelCheckPosition);
314 std::vector<ConstantType> terminationValueVector = terminationResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
315 currentValue = terminationValueVector[initialStateModel];
316 } else {
317 currentValue = valueVector[initialStateModel];
318 }
319
320 if (synthesisTask->getBound().isSatisfied(currentValue) && stochasticPosition) {
321 break;
322 }
323
324 for (auto const& parameter : miniBatch) {
325 auto checkResult = derivativeEvaluationHelper->check(env, nesterovPredictedPosition, parameter, valueVector);
326 ConstantType delta = checkResult->getValueVector()[derivativeEvaluationHelper->getInitialState()];
327 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
328 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
329 delta = -delta;
330 }
331 deltaVector[parameter] = delta;
332 }
333 } else {
334 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
335 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
336 currentValue = utility::infinity<ConstantType>();
337 } else {
338 currentValue = -utility::infinity<ConstantType>();
339 }
340 }
341
342 // Log position and probability information for later use in visualizing the descent, if wished.
343 if (recordRun) {
344 VisualizationPoint point;
345 point.position = nesterovPredictedPosition;
346 point.value = currentValue;
347 walk.push_back(point);
348 }
349
350 // Perform the step. The actualChange is the change in position the step caused. This is different from the
351 // delta in multiple ways: First, it's multiplied with the learning rate and stuff. Second, if the current value
352 // is at epsilon, and the delta would step out of the constrained which is then corrected, the actualChange is the
353 // change from the last to the current corrected position (so might be zero while the delta is not).
354 for (auto const& parameter : miniBatch) {
355 doStep(parameter, position, deltaVector, stepNum);
356 }
357
358 if (storm::utility::abs<ConstantType>(oldValue - currentValue) < terminationEpsilon) {
359 tinyChangeIterations += miniBatch.size();
360 if (tinyChangeIterations > parameterEnumeration.size()) {
361 break;
362 }
363 } else {
364 tinyChangeIterations = 0;
365 }
366
367 // Consider the next parameter
368 parameterNum = parameterNum + miniBatchSize;
369 if (parameterNum >= parameterEnumeration.size()) {
370 parameterNum = 0;
371 }
372
374 STORM_LOG_WARN("Aborting Gradient Descent, returning non-optimal value.");
375 break;
376 }
377 }
378 return currentValue;
379}
380
381template<typename FunctionType, typename ConstantType>
382std::pair<std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>, ConstantType>
384 STORM_LOG_ASSERT(this->synthesisTask, "Call setup before calling gradientDescent");
385
386 resetDynamicValues();
387
388 STORM_LOG_ASSERT(this->synthesisTask->isBoundSet(), "Task does not involve a bound.");
389
390 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> bestInstantiation;
391 ConstantType bestValue;
392 switch (this->synthesisTask->getBound().comparisonType) {
395 bestValue = -utility::infinity<ConstantType>();
396 break;
399 bestValue = utility::infinity<ConstantType>();
400 break;
401 }
402
403 std::random_device device;
404 std::default_random_engine engine(device());
405 std::uniform_real_distribution<> dist(0, 1);
406 bool initialGuess = true;
407 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> point;
408 while (true) {
409 STORM_PRINT_AND_LOG("Trying out a new starting point\n");
410 if (initialGuess) {
411 STORM_PRINT_AND_LOG("Trying initial guess (p->0.5 for every parameter p or set start point)\n");
412 }
413 // Generate random starting point
414 for (auto const& param : this->parameters) {
415 if (initialGuess) {
416 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
417 if (startPoint) {
418 point[param] = (*startPoint)[param];
419 } else {
420 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(0.5 + 1e-6);
421 }
422 } else if (!initialGuess && constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC &&
423 logarithmicBarrierTerm > utility::convertNumber<ConstantType>(0.00001)) {
424 // Do nothing
425 } else {
426 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
427 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(dist(engine));
428 }
429 }
430 initialGuess = false;
431
432 /* walk.clear(); */
433
434 stochasticWatch.start();
435 STORM_PRINT_AND_LOG("Starting at " << point << "\n");
436 ConstantType prob = stochasticGradientDescent(point);
437 stochasticWatch.stop();
438
439 bool isFoundPointBetter = false;
440 switch (this->synthesisTask->getBound().comparisonType) {
443 isFoundPointBetter = prob > bestValue;
444 break;
447 isFoundPointBetter = prob < bestValue;
448 break;
449 }
450 if (isFoundPointBetter) {
451 bestInstantiation = point;
452 bestValue = prob;
453 }
454
455 if (synthesisTask->getBound().isSatisfied(bestValue)) {
456 STORM_PRINT_AND_LOG("Aborting because the bound is satisfied\n");
457 break;
459 break;
460 } else {
462 logarithmicBarrierTerm = logarithmicBarrierTerm / 10;
463 STORM_PRINT_AND_LOG("Smaller term\n" << bestValue << "\n" << logarithmicBarrierTerm << "\n");
464 continue;
465 }
466 STORM_PRINT_AND_LOG("Sorry, couldn't satisfy the bound (yet). Best found value so far: " << bestValue << "\n");
467 continue;
468 }
469 }
470
472 // Apply sigmoid function
473 for (auto const& parameter : parameters) {
474 bestInstantiation[parameter] =
475 utility::one<CoefficientType<FunctionType>>() /
477 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(bestInstantiation[parameter]))));
478 }
479 }
480
481 return std::make_pair(bestInstantiation, bestValue);
482}
483
484template<typename FunctionType, typename ConstantType>
486 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
487 for (auto const& parameter : this->parameters) {
488 adam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
489 adam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
490 }
491 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
492 for (auto const& parameter : this->parameters) {
493 radam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
494 radam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
495 }
496 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
497 for (auto const& parameter : this->parameters) {
498 rmsProp->rootMeanSquare[parameter] = utility::zero<ConstantType>();
499 }
500 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
501 for (auto const& parameter : this->parameters) {
502 momentum->pastStep[parameter] = utility::zero<ConstantType>();
503 }
504 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
505 for (auto const& parameter : this->parameters) {
506 nesterov->pastStep[parameter] = utility::zero<ConstantType>();
507 }
508 }
509}
510
511template<typename FunctionType, typename ConstantType>
513 STORM_PRINT("[");
514 for (auto s = walk.begin(); s != walk.end(); ++s) {
515 STORM_PRINT("{");
516 auto point = s->position;
517 for (auto iter = point.begin(); iter != point.end(); ++iter) {
518 STORM_PRINT("\"" << iter->first.name() << "\"");
519 STORM_PRINT(":" << utility::convertNumber<double>(iter->second) << ",");
520 }
521 STORM_PRINT("\"value\":" << s->value << "}");
522 if (std::next(s) != walk.end()) {
523 STORM_PRINT(",");
524 }
525 }
526 STORM_PRINT("]\n");
527 // Print value at last step for data collection
528 STORM_PRINT(storm::utility::convertNumber<double>(walk.at(walk.size() - 1).value) << "\n");
529}
530
531template<typename FunctionType, typename ConstantType>
532std::vector<typename GradientDescentInstantiationSearcher<FunctionType, ConstantType>::VisualizationPoint>
536
539} // namespace derivative
540} // namespace storm
std::vector< VisualizationPoint > getVisualizationWalk()
Get the visualization walk that is recorded if recordRun is set to true in the constructor (false by ...
std::pair< std::map< typename utility::parametric::VariableType< FunctionType >::type, typename utility::parametric::CoefficientType< FunctionType >::type >, ConstantType > gradientDescent()
Perform Gradient Descent.
#define STORM_LOG_WARN(message)
Definition logging.h:25
#define STORM_LOG_ERROR(message)
Definition logging.h:26
#define STORM_LOG_ASSERT(cond, message)
Definition macros.h:11
#define STORM_PRINT_AND_LOG(message)
Definition macros.h:68
#define STORM_PRINT(message)
Define the macros that print information and optionally also log it.
Definition macros.h:62
typename utility::parametric::VariableType< FunctionType >::type VariableType
typename utility::parametric::CoefficientType< FunctionType >::type CoefficientType
bool isTerminate()
Check whether the program should terminate (due to some abort signal).
ValueType max(ValueType const &first, ValueType const &second)
ValueType min(ValueType const &first, ValueType const &second)
ValueType pow(ValueType const &value, int_fast64_t exponent)
ValueType one()
Definition constants.cpp:19