109 std::ostream &outStream ) {
111 if (proj_ == nullPtr) {
112 proj_ = makePtr<PolyhedralProjection<Real>>(makePtrFromRef(bnd));
113 hasPolyProj_ =
false;
115 proj_->project(x,outStream);
117 const Real one(1), TOL(1.e-2);
118 Real tol = std::sqrt(ROL_EPSILON<Real>());
129 alobj.
gradient(*state_->gradientVec,x,tol);
133 state_->cnorm = state_->constraintVec->norm();
141 if (useDefaultScaling_) {
144 Ptr<Vector<Real>> ji = x.
clone();
145 Real maxji(0), normji(0);
146 for (
int i = 0; i < c.
dimension(); ++i) {
149 maxji = std::max(normji,maxji);
151 cscale_ = one/std::max(one,maxji);
153 catch (std::exception &e) {
160 x.
axpy(-one,state_->gradientVec->dual());
161 proj_->project(x,outStream);
162 x.
axpy(-one/std::min(fscale_,cscale_),*state_->iterateVec);
163 state_->gnorm = x.
norm();
164 x.
set(*state_->iterateVec);
167 if (useDefaultInitPen_) {
168 const Real oem8(1e-8), oem2(1e-2), two(2), ten(10);
169 state_->searchSize = std::max(oem8,
170 std::min(ten*std::max(one,std::abs(fscale_*state_->value))
171 / std::max(one,std::pow(cscale_*state_->cnorm,two)),oem2*maxPenaltyParam_));
174 if (useRelTol_) outerOptTolerance_ *= state_->gnorm;
175 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
176 optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
177 optToleranceInitial_*std::pow(minPenaltyReciprocal_,optDecreaseExponent_));
178 optTolerance_ = std::min<Real>(optTolerance_,TOL*state_->gnorm);
179 feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
180 feasToleranceInitial_*std::pow(minPenaltyReciprocal_,feasDecreaseExponent_));
183 alobj.
reset(l,state_->searchSize);
185 if (verbosity_ > 1) {
186 outStream << std::endl;
187 outStream <<
"Augmented Lagrangian Initialize" << std::endl;
188 outStream <<
"Objective Scaling: " << fscale_ << std::endl;
189 outStream <<
"Constraint Scaling: " << cscale_ << std::endl;
190 outStream <<
"Penalty Parameter: " << state_->searchSize << std::endl;
191 outStream << std::endl;
203 std::ostream &outStream ) {
204 const Real one(1), oem2(1e-2);
205 Real tol(std::sqrt(ROL_EPSILON<Real>()));
208 state_->searchSize,g,eres,emul,
209 scaleLagrangian_,HessianApprox_);
210 initialize(x,g,emul,eres,alobj,bnd,econ,outStream);
211 Ptr<TypeB::Algorithm<Real>> algo;
214 if (verbosity_ > 0) writeOutput(outStream,
true);
216 while (status_->check(*state_)) {
218 list_.sublist(
"Status Test").set(
"Gradient Tolerance",optTolerance_);
219 list_.sublist(
"Status Test").set(
"Step Tolerance",1.e-6*optTolerance_);
220 algo = TypeB::AlgorithmFactory<Real>(list_);
221 if (hasPolyProj_) algo->run(x,g,alobj,bnd,*proj_->getLinearConstraint(),
222 *proj_->getMultiplier(),*proj_->getResidual(),
224 else algo->run(x,g,alobj,bnd,outStream);
225 subproblemIter_ = algo->getState()->iter;
228 state_->stepVec->set(x);
229 state_->stepVec->axpy(-one,*state_->iterateVec);
230 state_->snorm = state_->stepVec->norm();
234 state_->iterateVec->set(x);
237 state_->cnorm = state_->constraintVec->norm();
238 alobj.
gradient(*state_->gradientVec,x,tol);
239 if (scaleLagrangian_) {
240 state_->gradientVec->scale(state_->searchSize);
242 x.
axpy(-one/std::min(fscale_,cscale_),state_->gradientVec->dual());
243 proj_->project(x,outStream);
244 x.
axpy(-one,*state_->iterateVec);
245 state_->gnorm = x.
norm();
246 x.
set(*state_->iterateVec);
256 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
257 if ( cscale_*state_->cnorm < feasTolerance_ ) {
258 emul.
axpy(state_->searchSize*cscale_,state_->constraintVec->dual());
259 optTolerance_ = std::max(oem2*outerOptTolerance_,
260 optTolerance_*std::pow(minPenaltyReciprocal_,optIncreaseExponent_));
261 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
262 feasTolerance_*std::pow(minPenaltyReciprocal_,feasIncreaseExponent_));
264 state_->snorm += state_->searchSize*cscale_*state_->cnorm;
265 state_->lagmultVec->set(emul);
268 state_->searchSize = std::min(penaltyUpdate_*state_->searchSize,maxPenaltyParam_);
269 optTolerance_ = std::max(oem2*outerOptTolerance_,
270 optToleranceInitial_*std::pow(minPenaltyReciprocal_,optDecreaseExponent_));
271 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
272 feasToleranceInitial_*std::pow(minPenaltyReciprocal_,feasDecreaseExponent_));
274 alobj.
reset(emul,state_->searchSize);
278 if (verbosity_ > 0) writeOutput(outStream,printHeader_);
285 std::stringstream hist;
287 hist << std::string(114,
'-') << std::endl;
288 hist <<
"Augmented Lagrangian status output definitions" << std::endl << std::endl;
289 hist <<
" iter - Number of iterates (steps taken)" << std::endl;
290 hist <<
" fval - Objective function value" << std::endl;
291 hist <<
" cnorm - Norm of the constraint violation" << std::endl;
292 hist <<
" gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
293 hist <<
" snorm - Norm of the step" << std::endl;
294 hist <<
" penalty - Penalty parameter" << std::endl;
295 hist <<
" feasTol - Feasibility tolerance" << std::endl;
296 hist <<
" optTol - Optimality tolerance" << std::endl;
297 hist <<
" #fval - Number of times the objective was computed" << std::endl;
298 hist <<
" #grad - Number of times the gradient was computed" << std::endl;
299 hist <<
" #cval - Number of times the constraint was computed" << std::endl;
300 hist <<
" subIter - Number of iterations to solve subproblem" << std::endl;
301 hist << std::string(114,
'-') << std::endl;
304 hist << std::setw(6) << std::left <<
"iter";
305 hist << std::setw(15) << std::left <<
"fval";
306 hist << std::setw(15) << std::left <<
"cnorm";
307 hist << std::setw(15) << std::left <<
"gLnorm";
308 hist << std::setw(15) << std::left <<
"snorm";
309 hist << std::setw(10) << std::left <<
"penalty";
310 hist << std::setw(10) << std::left <<
"feasTol";
311 hist << std::setw(10) << std::left <<
"optTol";
312 hist << std::setw(8) << std::left <<
"#fval";
313 hist << std::setw(8) << std::left <<
"#grad";
314 hist << std::setw(8) << std::left <<
"#cval";
315 hist << std::setw(8) << std::left <<
"subIter";
331 std::stringstream hist;
332 hist << std::scientific << std::setprecision(6);
333 if ( state_->iter == 0 ) writeName(os);
334 if ( print_header ) writeHeader(os);
335 if ( state_->iter == 0 ) {
337 hist << std::setw(6) << std::left << state_->iter;
338 hist << std::setw(15) << std::left << state_->value;
339 hist << std::setw(15) << std::left << state_->cnorm;
340 hist << std::setw(15) << std::left << state_->gnorm;
341 hist << std::setw(15) << std::left <<
"---";
342 hist << std::scientific << std::setprecision(2);
343 hist << std::setw(10) << std::left << state_->searchSize;
344 hist << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
345 hist << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
346 hist << std::scientific << std::setprecision(6);
347 hist << std::setw(8) << std::left << state_->nfval;
348 hist << std::setw(8) << std::left << state_->ngrad;
349 hist << std::setw(8) << std::left << state_->ncval;
350 hist << std::setw(8) << std::left <<
"---";
355 hist << std::setw(6) << std::left << state_->iter;
356 hist << std::setw(15) << std::left << state_->value;
357 hist << std::setw(15) << std::left << state_->cnorm;
358 hist << std::setw(15) << std::left << state_->gnorm;
359 hist << std::setw(15) << std::left << state_->snorm;
360 hist << std::scientific << std::setprecision(2);
361 hist << std::setw(10) << std::left << state_->searchSize;
362 hist << std::setw(10) << std::left << feasTolerance_;
363 hist << std::setw(10) << std::left << optTolerance_;
364 hist << std::scientific << std::setprecision(6);
365 hist << std::setw(8) << std::left << state_->nfval;
366 hist << std::setw(8) << std::left << state_->ngrad;
367 hist << std::setw(8) << std::left << state_->ncval;
368 hist << std::setw(8) << std::left << subproblemIter_;
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, Constraint< Real > &econ, Vector< Real > &emul, const Vector< Real > &eres, std::ostream &outStream=std::cout) override
Run algorithm on general constrained problems (Type-G). This is the primary Type-G interface.