Storm
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
GradientDescentInstantiationSearcher.cpp
Go to the documentation of this file.
2#include <cmath>
3#include <cstdint>
4#include <random>
19
20namespace storm {
21namespace derivative {
22
23template<typename FunctionType>
25template<typename FunctionType>
27
28template<typename FunctionType, typename ConstantType>
31 const std::map<VariableType<FunctionType>, ConstantType>& gradient, uint64_t stepNum) {
32 const ConstantType precisionAsConstant =
33 utility::convertNumber<ConstantType>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
34 const CoefficientType<FunctionType> precision =
35 storm::utility::convertNumber<CoefficientType<FunctionType>>(storm::settings::getModule<storm::settings::modules::GeneralSettings>().getPrecision());
36 CoefficientType<FunctionType> const oldPos = position[steppingParameter];
37 ConstantType const oldPosAsConstant = utility::convertNumber<ConstantType>(position[steppingParameter]);
38
39 ConstantType projectedGradient;
41 // Project gradient
42 ConstantType newPlainPosition = oldPosAsConstant + precisionAsConstant * gradient.at(steppingParameter);
43 if (newPlainPosition < utility::zero<ConstantType>() + precisionAsConstant || newPlainPosition > utility::one<ConstantType>() - precisionAsConstant) {
44 projectedGradient = 0;
45 } else {
46 projectedGradient = gradient.at(steppingParameter);
47 }
48 } else if (constraintMethod == GradientDescentConstraintMethod::LOGISTIC_SIGMOID) {
49 // We want the derivative of f(logit(x)), this happens to be exp(x) * f'(logit(x)) / (exp(x) + 1)^2
50 const double expX = std::exp(utility::convertNumber<double>(oldPos));
51 projectedGradient = gradient.at(steppingParameter) * utility::convertNumber<ConstantType>(expX / std::pow(expX + 1, 2));
52 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_INFINITY) {
53 if (oldPosAsConstant < precisionAsConstant) {
54 projectedGradient = 1000;
55 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
56 projectedGradient = -1000;
57 } else {
58 projectedGradient = gradient.at(steppingParameter);
59 }
60 } else if (constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC) {
61 // Our barrier is:
62 // log(x) if 0 < x < 0.5
63 // log(1 - x) if 0.5 <= x < 1
64 // -infinity otherwise
65 // The gradient of this is
66 // 1/x, 1/(1-x), +/-infinity respectively
67 if (oldPosAsConstant >= precisionAsConstant && oldPosAsConstant <= utility::one<ConstantType>() - precisionAsConstant) {
68 /* const double mu = (double) parameters.size() / (double) stepNum; */
69 if (oldPosAsConstant * 2 < utility::one<ConstantType>()) {
70 projectedGradient = gradient.at(steppingParameter) + logarithmicBarrierTerm / (oldPosAsConstant - precisionAsConstant);
71 } else {
72 projectedGradient =
73 gradient.at(steppingParameter) - logarithmicBarrierTerm / (utility::one<ConstantType>() - precisionAsConstant - oldPosAsConstant);
74 }
75 } else {
76 if (oldPosAsConstant < precisionAsConstant) {
77 projectedGradient = utility::one<ConstantType>() / logarithmicBarrierTerm;
78 } else if (oldPosAsConstant > utility::one<ConstantType>() - precisionAsConstant) {
79 projectedGradient = -utility::one<ConstantType>() / logarithmicBarrierTerm;
80 }
81 }
82 } else {
83 projectedGradient = gradient.at(steppingParameter);
84 }
85
86 // Compute step based on used gradient descent method
87 ConstantType step;
88 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
89 // For this algorihm, see the various sources available on the ADAM algorithm. This implementation should
90 // be correct, as it is compared with a run of keras's ADAM optimizer in the test.
91 adam->decayingStepAverage[steppingParameter] =
92 adam->averageDecay * adam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - adam->averageDecay) * projectedGradient;
93 adam->decayingStepAverageSquared[steppingParameter] = adam->squaredAverageDecay * adam->decayingStepAverageSquared[steppingParameter] +
94 (utility::one<ConstantType>() - adam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
95
96 const ConstantType correctedGradient =
97 adam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->averageDecay, stepNum + 1));
98 const ConstantType correctedSquaredGradient =
99 adam->decayingStepAverageSquared[steppingParameter] / (utility::one<ConstantType>() - utility::pow(adam->squaredAverageDecay, stepNum + 1));
100
101 const ConstantType toSqrt = correctedSquaredGradient;
102 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
103
104 step = (adam->learningRate / (sqrtResult + precisionAsConstant)) * correctedGradient;
105 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
106 // You can compare this with the RAdam paper's "Algorithm 2: Rectified Adam".
107 // The line numbers and comments are matched.
108 // Initializing / Compute Gradient: Already happened.
109 // 2: Compute maximum length of approximated simple moving average
110 const ConstantType maxLengthApproxSMA = 2 / (utility::one<ConstantType>() - radam->squaredAverageDecay) - utility::one<ConstantType>();
111
112 // 5: Update exponential moving 2nd moment
113 radam->decayingStepAverageSquared[steppingParameter] = radam->squaredAverageDecay * radam->decayingStepAverageSquared[steppingParameter] +
114 (utility::one<ConstantType>() - radam->squaredAverageDecay) * utility::pow(projectedGradient, 2);
115 // 6: Update exponential moving 1st moment
116 radam->decayingStepAverage[steppingParameter] =
117 radam->averageDecay * radam->decayingStepAverage[steppingParameter] + (utility::one<ConstantType>() - radam->averageDecay) * projectedGradient;
118 // 7: Compute bias corrected moving average
119 const ConstantType biasCorrectedMovingAverage =
120 radam->decayingStepAverage[steppingParameter] / (utility::one<ConstantType>() - utility::pow(radam->averageDecay, stepNum + 1));
121 const ConstantType squaredAverageDecayPow = utility::pow(radam->squaredAverageDecay, stepNum + 1);
122 // 8: Compute the length of the approximated single moving average
123 const ConstantType lengthApproxSMA =
124 maxLengthApproxSMA -
125 ((2 * (utility::convertNumber<ConstantType>(stepNum) + utility::one<ConstantType>()) * squaredAverageDecayPow) / (1 - squaredAverageDecayPow));
126 // 9: If the variance is tractable, i.e. lengthApproxSMA > 4, then
127 if (lengthApproxSMA > 4) {
128 // 10: Compute adaptive learning rate
129 const ConstantType adaptiveLearningRate =
130 constantTypeSqrt((utility::one<ConstantType>() - squaredAverageDecayPow) / radam->decayingStepAverageSquared[steppingParameter]);
131 // 11: Compute the variance rectification term
132 const ConstantType varianceRectification =
133 constantTypeSqrt(((lengthApproxSMA - 4) / (maxLengthApproxSMA - 4)) * ((lengthApproxSMA - 2) / (maxLengthApproxSMA - 2)) *
134 ((maxLengthApproxSMA) / (lengthApproxSMA)));
135 // 12: Update parameters with adaptive momentum
136 step = radam->learningRate * varianceRectification * biasCorrectedMovingAverage * adaptiveLearningRate;
137 } else {
138 // 14: Update parameters with un-adapted momentum
139 step = radam->learningRate * biasCorrectedMovingAverage;
140 }
141 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
142 rmsProp->rootMeanSquare[steppingParameter] = rmsProp->averageDecay * rmsProp->rootMeanSquare[steppingParameter] +
143 (utility::one<ConstantType>() - rmsProp->averageDecay) * projectedGradient * projectedGradient;
144
145 const ConstantType toSqrt = rmsProp->rootMeanSquare[steppingParameter] + precisionAsConstant;
146 ConstantType sqrtResult = constantTypeSqrt(toSqrt);
147
148 step = (rmsProp->learningRate / sqrtResult) * projectedGradient;
149 } else if (Plain* plain = boost::get<Plain>(&gradientDescentType)) {
150 if (useSignsOnly) {
151 if (projectedGradient < utility::zero<ConstantType>()) {
152 step = -plain->learningRate;
153 } else if (projectedGradient > utility::zero<ConstantType>()) {
154 step = plain->learningRate;
155 } else {
156 step = utility::zero<ConstantType>();
157 }
158 } else {
159 step = plain->learningRate * projectedGradient;
160 }
161 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
162 if (useSignsOnly) {
163 if (projectedGradient < utility::zero<ConstantType>()) {
164 step = -momentum->learningRate;
165 } else if (projectedGradient > utility::zero<ConstantType>()) {
166 step = momentum->learningRate;
167 } else {
168 step = utility::zero<ConstantType>();
169 }
170 } else {
171 step = momentum->learningRate * projectedGradient;
172 }
173 step += momentum->momentumTerm * momentum->pastStep.at(steppingParameter);
174 momentum->pastStep[steppingParameter] = step;
175 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
176 if (useSignsOnly) {
177 if (projectedGradient < utility::zero<ConstantType>()) {
178 step = -nesterov->learningRate;
179 } else if (projectedGradient > utility::zero<ConstantType>()) {
180 step = nesterov->learningRate;
181 } else {
182 step = utility::zero<ConstantType>();
183 }
184 } else {
185 step = nesterov->learningRate * projectedGradient;
186 }
187 step += nesterov->momentumTerm * nesterov->pastStep.at(steppingParameter);
188 nesterov->pastStep[steppingParameter] = step;
189 } else {
190 STORM_LOG_ERROR("GradientDescentType was not a known one");
191 }
192
193 const CoefficientType<FunctionType> convertedStep = utility::convertNumber<CoefficientType<FunctionType>>(step);
194 const CoefficientType<FunctionType> newPos = position[steppingParameter] + convertedStep;
195 position[steppingParameter] = newPos;
196 // Map parameter back to (0, 1).
198 position[steppingParameter] = utility::max(precision, position[steppingParameter]);
199 CoefficientType<FunctionType> const upperBound = utility::one<CoefficientType<FunctionType>>() - precision;
200 position[steppingParameter] = utility::min(upperBound, position[steppingParameter]);
201 }
202 return utility::abs<ConstantType>(oldPosAsConstant - utility::convertNumber<ConstantType>(position[steppingParameter]));
203}
204
205template<typename FunctionType, typename ConstantType>
206ConstantType GradientDescentInstantiationSearcher<FunctionType, ConstantType>::stochasticGradientDescent(
207 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>& position) {
208 uint_fast64_t initialStateModel = model.getStates("init").getNextSetIndex(0);
209
210 ConstantType currentValue;
211 switch (this->synthesisTask->getBound().comparisonType) {
214 currentValue = -utility::infinity<ConstantType>();
215 break;
218 currentValue = utility::infinity<ConstantType>();
219 break;
220 }
221
222 // We count the number of iterations where the value changes less than the threshold, and terminate if it is large enough.
223 uint64_t tinyChangeIterations = 0;
224
225 std::map<VariableType<FunctionType>, ConstantType> deltaVector;
226
227 std::vector<VariableType<FunctionType>> parameterEnumeration;
228 for (auto parameter : this->parameters) {
229 parameterEnumeration.push_back(parameter);
230 }
231
232 utility::Stopwatch printUpdateStopwatch;
233 printUpdateStopwatch.start();
234
235 // The index to keep track of what parameter(s) to consider next.
236 // The "mini-batch", so the parameters to consider, are parameterNum..parameterNum+miniBatchSize-1
237 uint_fast64_t parameterNum = 0;
238 for (uint_fast64_t stepNum = 0; true; ++stepNum) {
239 if (printUpdateStopwatch.getTimeInSeconds() >= 15) {
240 printUpdateStopwatch.restart();
241 STORM_PRINT_AND_LOG("Currently at " << currentValue << "\n");
242 }
243
244 std::vector<VariableType<FunctionType>> miniBatch;
245 for (uint_fast64_t i = parameterNum; i < std::min((uint_fast64_t)parameterEnumeration.size(), parameterNum + miniBatchSize); i++) {
246 miniBatch.push_back(parameterEnumeration[i]);
247 }
248
249 ConstantType oldValue = currentValue;
250 CoefficientType<FunctionType> const precision = storm::utility::convertNumber<CoefficientType<FunctionType>>(
252
253 // If nesterov is enabled, we need to compute the gradient on the predicted position
254 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> nesterovPredictedPosition(position);
255 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
256 CoefficientType<FunctionType> const upperBound = (utility::one<CoefficientType<FunctionType>>() - precision);
257 for (auto const& parameter : miniBatch) {
258 ConstantType const addedTerm = nesterov->momentumTerm * nesterov->pastStep[parameter];
259 nesterovPredictedPosition[parameter] += storm::utility::convertNumber<CoefficientType<FunctionType>>(addedTerm);
260 nesterovPredictedPosition[parameter] = utility::max(precision, nesterovPredictedPosition[parameter]);
261 nesterovPredictedPosition[parameter] = utility::min(upperBound, nesterovPredictedPosition[parameter]);
262 }
263 }
265 // Apply sigmoid function
266 for (auto const& parameter : parameters) {
267 nesterovPredictedPosition[parameter] =
268 utility::one<CoefficientType<FunctionType>>() /
269 (utility::one<CoefficientType<FunctionType>>() +
270 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(nesterovPredictedPosition[parameter]))));
271 }
272 }
273
274 // Compute the value of our position and terminate if it satisfies the bound or is
275 // zero or one when computing probabilities. The "valueVector" (just the probability/expected
276 // reward for eventually reaching the target from every state) is also used for computing
277 // the gradient later. We only need one computation of the "valueVector" per mini-batch.
278 //
279 // If nesterov is activated, we need to do this twice. First, to check the value of the current position.
280 // Second, to compute the valueVector at the nesterovPredictedPosition.
281 // If nesterov is deactivated, then nesterovPredictedPosition == position.
282
283 // Are we at a stochastic (in bounds) position?
284 bool stochasticPosition = true;
285 for (auto const& parameter : parameters) {
286 if (nesterovPredictedPosition[parameter] < 0 + precision || nesterovPredictedPosition[parameter] > 1 - precision) {
287 stochasticPosition = false;
288 break;
289 }
290 }
291
292 bool computeValue = true;
294 if (!stochasticPosition) {
295 computeValue = false;
296 }
297 }
298
299 if (computeValue) {
300 std::unique_ptr<storm::modelchecker::CheckResult> intermediateResult = instantiationModelChecker->check(env, nesterovPredictedPosition);
301 std::vector<ConstantType> valueVector = intermediateResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
302 if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
303 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> modelCheckPosition(position);
305 for (auto const& parameter : parameters) {
306 modelCheckPosition[parameter] =
307 utility::one<CoefficientType<FunctionType>>() /
308 (utility::one<CoefficientType<FunctionType>>() +
309 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(modelCheckPosition[parameter]))));
310 }
311 }
312 std::unique_ptr<storm::modelchecker::CheckResult> terminationResult = instantiationModelChecker->check(env, modelCheckPosition);
313 std::vector<ConstantType> terminationValueVector = terminationResult->asExplicitQuantitativeCheckResult<ConstantType>().getValueVector();
314 currentValue = terminationValueVector[initialStateModel];
315 } else {
316 currentValue = valueVector[initialStateModel];
317 }
318
319 if (synthesisTask->getBound().isSatisfied(currentValue) && stochasticPosition) {
320 break;
321 }
322
323 for (auto const& parameter : miniBatch) {
324 auto checkResult = derivativeEvaluationHelper->check(env, nesterovPredictedPosition, parameter, valueVector);
325 ConstantType delta = checkResult->getValueVector()[derivativeEvaluationHelper->getInitialState()];
326 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
327 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
328 delta = -delta;
329 }
330 deltaVector[parameter] = delta;
331 }
332 } else {
333 if (synthesisTask->getBound().comparisonType == logic::ComparisonType::Less ||
334 synthesisTask->getBound().comparisonType == logic::ComparisonType::LessEqual) {
335 currentValue = utility::infinity<ConstantType>();
336 } else {
337 currentValue = -utility::infinity<ConstantType>();
338 }
339 }
340
341 // Log position and probability information for later use in visualizing the descent, if wished.
342 if (recordRun) {
343 VisualizationPoint point;
344 point.position = nesterovPredictedPosition;
345 point.value = currentValue;
346 walk.push_back(point);
347 }
348
349 // Perform the step. The actualChange is the change in position the step caused. This is different from the
350 // delta in multiple ways: First, it's multiplied with the learning rate and stuff. Second, if the current value
351 // is at epsilon, and the delta would step out of the constrained which is then corrected, the actualChange is the
352 // change from the last to the current corrected position (so might be zero while the delta is not).
353 for (auto const& parameter : miniBatch) {
354 doStep(parameter, position, deltaVector, stepNum);
355 }
356
357 if (storm::utility::abs<ConstantType>(oldValue - currentValue) < terminationEpsilon) {
358 tinyChangeIterations += miniBatch.size();
359 if (tinyChangeIterations > parameterEnumeration.size()) {
360 break;
361 }
362 } else {
363 tinyChangeIterations = 0;
364 }
365
366 // Consider the next parameter
367 parameterNum = parameterNum + miniBatchSize;
368 if (parameterNum >= parameterEnumeration.size()) {
369 parameterNum = 0;
370 }
371
373 STORM_LOG_WARN("Aborting Gradient Descent, returning non-optimal value.");
374 break;
375 }
376 }
377 return currentValue;
378}
379
380template<typename FunctionType, typename ConstantType>
381std::pair<std::map<VariableType<FunctionType>, CoefficientType<FunctionType>>, ConstantType>
383 STORM_LOG_ASSERT(this->synthesisTask, "Call setup before calling gradientDescent");
384
385 resetDynamicValues();
386
387 STORM_LOG_ASSERT(this->synthesisTask->isBoundSet(), "Task does not involve a bound.");
388
389 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> bestInstantiation;
390 ConstantType bestValue;
391 switch (this->synthesisTask->getBound().comparisonType) {
394 bestValue = -utility::infinity<ConstantType>();
395 break;
398 bestValue = utility::infinity<ConstantType>();
399 break;
400 }
401
402 std::random_device device;
403 std::default_random_engine engine(device());
404 std::uniform_real_distribution<> dist(0, 1);
405 bool initialGuess = true;
406 std::map<VariableType<FunctionType>, CoefficientType<FunctionType>> point;
407 while (true) {
408 STORM_PRINT_AND_LOG("Trying out a new starting point\n");
409 if (initialGuess) {
410 STORM_PRINT_AND_LOG("Trying initial guess (p->0.5 for every parameter p or set start point)\n");
411 }
412 // Generate random starting point
413 for (auto const& param : this->parameters) {
414 if (initialGuess) {
415 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
416 if (startPoint) {
417 point[param] = (*startPoint)[param];
418 } else {
419 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(0.5 + 1e-6);
420 }
421 } else if (!initialGuess && constraintMethod == GradientDescentConstraintMethod::BARRIER_LOGARITHMIC &&
422 logarithmicBarrierTerm > utility::convertNumber<ConstantType>(0.00001)) {
423 // Do nothing
424 } else {
425 logarithmicBarrierTerm = utility::convertNumber<ConstantType>(0.1);
426 point[param] = utility::convertNumber<CoefficientType<FunctionType>>(dist(engine));
427 }
428 }
429 initialGuess = false;
430
431 /* walk.clear(); */
432
433 stochasticWatch.start();
434 STORM_PRINT_AND_LOG("Starting at " << point << "\n");
435 ConstantType prob = stochasticGradientDescent(point);
436 stochasticWatch.stop();
437
438 bool isFoundPointBetter = false;
439 switch (this->synthesisTask->getBound().comparisonType) {
442 isFoundPointBetter = prob > bestValue;
443 break;
446 isFoundPointBetter = prob < bestValue;
447 break;
448 }
449 if (isFoundPointBetter) {
450 bestInstantiation = point;
451 bestValue = prob;
452 }
453
454 if (synthesisTask->getBound().isSatisfied(bestValue)) {
455 STORM_PRINT_AND_LOG("Aborting because the bound is satisfied\n");
456 break;
458 break;
459 } else {
461 logarithmicBarrierTerm = logarithmicBarrierTerm / 10;
462 STORM_PRINT_AND_LOG("Smaller term\n" << bestValue << "\n" << logarithmicBarrierTerm << "\n");
463 continue;
464 }
465 STORM_PRINT_AND_LOG("Sorry, couldn't satisfy the bound (yet). Best found value so far: " << bestValue << "\n");
466 continue;
467 }
468 }
469
471 // Apply sigmoid function
472 for (auto const& parameter : parameters) {
473 bestInstantiation[parameter] =
474 utility::one<CoefficientType<FunctionType>>() /
476 utility::convertNumber<CoefficientType<FunctionType>>(std::exp(-utility::convertNumber<double>(bestInstantiation[parameter]))));
477 }
478 }
479
480 return std::make_pair(bestInstantiation, bestValue);
481}
482
483template<typename FunctionType, typename ConstantType>
485 if (Adam* adam = boost::get<Adam>(&gradientDescentType)) {
486 for (auto const& parameter : this->parameters) {
487 adam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
488 adam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
489 }
490 } else if (RAdam* radam = boost::get<RAdam>(&gradientDescentType)) {
491 for (auto const& parameter : this->parameters) {
492 radam->decayingStepAverage[parameter] = utility::zero<ConstantType>();
493 radam->decayingStepAverageSquared[parameter] = utility::zero<ConstantType>();
494 }
495 } else if (RmsProp* rmsProp = boost::get<RmsProp>(&gradientDescentType)) {
496 for (auto const& parameter : this->parameters) {
497 rmsProp->rootMeanSquare[parameter] = utility::zero<ConstantType>();
498 }
499 } else if (Momentum* momentum = boost::get<Momentum>(&gradientDescentType)) {
500 for (auto const& parameter : this->parameters) {
501 momentum->pastStep[parameter] = utility::zero<ConstantType>();
502 }
503 } else if (Nesterov* nesterov = boost::get<Nesterov>(&gradientDescentType)) {
504 for (auto const& parameter : this->parameters) {
505 nesterov->pastStep[parameter] = utility::zero<ConstantType>();
506 }
507 }
508}
509
510template<typename FunctionType, typename ConstantType>
512 STORM_PRINT("[");
513 for (auto s = walk.begin(); s != walk.end(); ++s) {
514 STORM_PRINT("{");
515 auto point = s->position;
516 for (auto iter = point.begin(); iter != point.end(); ++iter) {
517 STORM_PRINT("\"" << iter->first.name() << "\"");
518 STORM_PRINT(":" << utility::convertNumber<double>(iter->second) << ",");
519 }
520 STORM_PRINT("\"value\":" << s->value << "}");
521 if (std::next(s) != walk.end()) {
522 STORM_PRINT(",");
523 }
524 }
525 STORM_PRINT("]\n");
526 // Print value at last step for data collection
527 STORM_PRINT(storm::utility::convertNumber<double>(walk.at(walk.size() - 1).value) << "\n");
528}
529
530template<typename FunctionType, typename ConstantType>
531std::vector<typename GradientDescentInstantiationSearcher<FunctionType, ConstantType>::VisualizationPoint>
535
538} // namespace derivative
539} // namespace storm
std::vector< VisualizationPoint > getVisualizationWalk()
Get the visualization walk that is recorded if recordRun is set to true in the constructor (false by ...
std::pair< std::map< typename utility::parametric::VariableType< FunctionType >::type, typename utility::parametric::CoefficientType< FunctionType >::type >, ConstantType > gradientDescent()
Perform Gradient Descent.
#define STORM_LOG_WARN(message)
Definition logging.h:30
#define STORM_LOG_ERROR(message)
Definition logging.h:31
#define STORM_LOG_ASSERT(cond, message)
Definition macros.h:11
#define STORM_PRINT_AND_LOG(message)
Definition macros.h:68
#define STORM_PRINT(message)
Define the macros that print information and optionally also log it.
Definition macros.h:62
typename utility::parametric::VariableType< FunctionType >::type VariableType
typename utility::parametric::CoefficientType< FunctionType >::type CoefficientType
SettingsType const & getModule()
Get module.
bool isTerminate()
Check whether the program should terminate (due to some abort signal).
ValueType max(ValueType const &first, ValueType const &second)
ValueType min(ValueType const &first, ValueType const &second)
ValueType pow(ValueType const &value, int_fast64_t exponent)
ValueType one()
Definition constants.cpp:21
LabParser.cpp.
Definition cli.cpp:18