/************************************************************************* ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation (www.fsf.org); either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. A copy of the GNU General Public License is available at http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ #ifdef _MSC_VER #define _CRT_SECURE_NO_WARNINGS #endif #include "stdafx.h" #include "optimization.h" // disable some irrelevant warnings #if (AE_COMPILER==AE_MSVC) && !defined(AE_ALL_WARNINGS) #pragma warning(disable:4100) #pragma warning(disable:4127) #pragma warning(disable:4611) #pragma warning(disable:4702) #pragma warning(disable:4996) #endif ///////////////////////////////////////////////////////////////////////// // // THIS SECTION CONTAINS IMPLEMENTATION OF C++ INTERFACE // ///////////////////////////////////////////////////////////////////////// namespace alglib { #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This structure is used to store OptGuard report, i.e. report on the properties of the nonlinear function being optimized with ALGLIB. After you tell your optimizer to activate OptGuard this technology starts to silently monitor function values and gradients/Jacobians being passed all around during your optimization session. Depending on specific set of checks enabled OptGuard may perform additional function evaluations (say, about 3*N evaluations if you want to check analytic gradient for errors). Upon discovering that something strange happens (function values and/or gradient components change too sharply and/or unexpectedly) OptGuard sets one of the "suspicion flags" (without interrupting optimization session). After optimization is done, you can examine OptGuard report. Following report fields can be set: * nonc0suspected * nonc1suspected * badgradsuspected === WHAT CAN BE DETECTED WITH OptGuard INTEGRITY CHECKER ================= Following types of errors in your target function (constraints) can be caught: a) discontinuous functions ("non-C0" part of the report) b) functions with discontinuous derivative ("non-C1" part of the report) c) errors in the analytic gradient provided by user These types of errors result in optimizer stopping well before reaching solution (most often - right after encountering discontinuity). Type A errors are usually coding errors during implementation of the target function. Most "normal" problems involve continuous functions, and anyway you can't reliably optimize discontinuous function. Type B errors are either coding errors or (in case code itself is correct) evidence of the fact that your problem is an "incorrect" one. Most optimizers (except for ones provided by MINNS subpackage) do not support nonsmooth problems. Type C errors are coding errors which often prevent optimizer from making even one step or result in optimizing stopping too early, as soon as actual descent direction becomes too different from one suggested by user- supplied gradient. === WHAT IS REPORTED ===================================================== Following set of report fields deals with discontinuous target functions, ones not belonging to C0 continuity class: * nonc0suspected - is a flag which is set upon discovering some indication of the discontinuity. If this flag is false, the rest of "non-C0" fields should be ignored * nonc0fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C0" * nonc0lipshitzc - a Lipchitz constant for a function which was suspected of being non-continuous. * nonc0test0positive - set to indicate specific test which detected continuity violation (test #0) Following set of report fields deals with discontinuous gradient/Jacobian, i.e. with functions violating C1 continuity: * nonc1suspected - is a flag which is set upon discovering some indication of the discontinuity. If this flag is false, the rest of "non-C1" fields should be ignored * nonc1fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * nonc1lipshitzc - a Lipchitz constant for a function gradient which was suspected of being non-smooth. * nonc1test0positive - set to indicate specific test which detected continuity violation (test #0) * nonc1test1positive - set to indicate specific test which detected continuity violation (test #1) Following set of report fields deals with errors in the gradient: * badgradsuspected - is a flad which is set upon discovering an error in the analytic gradient supplied by user * badgradfidx - index of the function with bad gradient (0 for target function, 1 or higher for nonlinear constraints) * badgradvidx - index of the variable * badgradxbase - location where Jacobian is tested * following matrices store user-supplied Jacobian and its numerical differentiation version (which is assumed to be free from the coding errors), both of them computed near the initial point: * badgraduser, an array[K,N], analytic Jacobian supplied by user * badgradnum, an array[K,N], numeric Jacobian computed by ALGLIB Here K is a total number of nonlinear functions (target + nonlinear constraints), N is a variable number. The element of badgraduser[] with index [badgradfidx,badgradvidx] is assumed to be wrong. More detailed error log can be obtained from optimizer by explicitly requesting reports for tests C0.0, C1.0, C1.1. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ _optguardreport_owner::_optguardreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::optguardreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardreport)); alglib_impl::_optguardreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _optguardreport_owner::_optguardreport_owner(const _optguardreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::optguardreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardreport)); alglib_impl::_optguardreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _optguardreport_owner& _optguardreport_owner::operator=(const _optguardreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: optguardreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_optguardreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::optguardreport)); alglib_impl::_optguardreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _optguardreport_owner::~_optguardreport_owner() { if( p_struct!=NULL ) { alglib_impl::_optguardreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::optguardreport* _optguardreport_owner::c_ptr() { return p_struct; } alglib_impl::optguardreport* _optguardreport_owner::c_ptr() const { return const_cast(p_struct); } optguardreport::optguardreport() : _optguardreport_owner() ,nonc0suspected(p_struct->nonc0suspected),nonc0test0positive(p_struct->nonc0test0positive),nonc0fidx(p_struct->nonc0fidx),nonc0lipschitzc(p_struct->nonc0lipschitzc),nonc1suspected(p_struct->nonc1suspected),nonc1test0positive(p_struct->nonc1test0positive),nonc1test1positive(p_struct->nonc1test1positive),nonc1fidx(p_struct->nonc1fidx),nonc1lipschitzc(p_struct->nonc1lipschitzc),badgradsuspected(p_struct->badgradsuspected),badgradfidx(p_struct->badgradfidx),badgradvidx(p_struct->badgradvidx),badgradxbase(&p_struct->badgradxbase),badgraduser(&p_struct->badgraduser),badgradnum(&p_struct->badgradnum) { } optguardreport::optguardreport(const optguardreport &rhs):_optguardreport_owner(rhs) ,nonc0suspected(p_struct->nonc0suspected),nonc0test0positive(p_struct->nonc0test0positive),nonc0fidx(p_struct->nonc0fidx),nonc0lipschitzc(p_struct->nonc0lipschitzc),nonc1suspected(p_struct->nonc1suspected),nonc1test0positive(p_struct->nonc1test0positive),nonc1test1positive(p_struct->nonc1test1positive),nonc1fidx(p_struct->nonc1fidx),nonc1lipschitzc(p_struct->nonc1lipschitzc),badgradsuspected(p_struct->badgradsuspected),badgradfidx(p_struct->badgradfidx),badgradvidx(p_struct->badgradvidx),badgradxbase(&p_struct->badgradxbase),badgraduser(&p_struct->badgraduser),badgradnum(&p_struct->badgradnum) { } optguardreport& optguardreport::operator=(const optguardreport &rhs) { if( this==&rhs ) return *this; _optguardreport_owner::operator=(rhs); return *this; } optguardreport::~optguardreport() { } /************************************************************************* This structure is used for detailed reporting about suspected C0 continuity violation. === WHAT IS TESTED ======================================================= C0 test studies function values (not gradient!) obtained during line searches and monitors estimate of the Lipschitz constant. Sudden spikes usually indicate that discontinuity was detected. === WHAT IS REPORTED ===================================================== Actually, report retrieval function returns TWO report structures: * one for most suspicious point found so far (one with highest change in the function value), so called "strongest" report * another one for most detailed line search (more function evaluations = easier to understand what's going on) which triggered test #0 criteria, so called "longest" report In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. You can plot function values stored in stp[] and f[] arrays and study behavior of your function by your own eyes, just to be sure that test correctly reported C1 violation. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ _optguardnonc0report_owner::_optguardnonc0report_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc0report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::optguardnonc0report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc0report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc0report)); alglib_impl::_optguardnonc0report_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _optguardnonc0report_owner::_optguardnonc0report_owner(const _optguardnonc0report_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc0report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc0report copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::optguardnonc0report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc0report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc0report)); alglib_impl::_optguardnonc0report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _optguardnonc0report_owner& _optguardnonc0report_owner::operator=(const _optguardnonc0report_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: optguardnonc0report assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc0report assignment constructor failure (source is not initialized)", &_state); alglib_impl::_optguardnonc0report_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc0report)); alglib_impl::_optguardnonc0report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _optguardnonc0report_owner::~_optguardnonc0report_owner() { if( p_struct!=NULL ) { alglib_impl::_optguardnonc0report_destroy(p_struct); ae_free(p_struct); } } alglib_impl::optguardnonc0report* _optguardnonc0report_owner::c_ptr() { return p_struct; } alglib_impl::optguardnonc0report* _optguardnonc0report_owner::c_ptr() const { return const_cast(p_struct); } optguardnonc0report::optguardnonc0report() : _optguardnonc0report_owner() ,positive(p_struct->positive),fidx(p_struct->fidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),f(&p_struct->f),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc0report::optguardnonc0report(const optguardnonc0report &rhs):_optguardnonc0report_owner(rhs) ,positive(p_struct->positive),fidx(p_struct->fidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),f(&p_struct->f),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc0report& optguardnonc0report::operator=(const optguardnonc0report &rhs) { if( this==&rhs ) return *this; _optguardnonc0report_owner::operator=(rhs); return *this; } optguardnonc0report::~optguardnonc0report() { } /************************************************************************* This structure is used for detailed reporting about suspected C1 continuity violation as flagged by C1 test #0 (OptGuard has several tests for C1 continuity, this report is used by #0). === WHAT IS TESTED ======================================================= C1 test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of directional derivative estimate. This test is less powerful than test #1, but it does not depend on gradient values and thus it is more robust against artifacts introduced by numerical differentiation. === WHAT IS REPORTED ===================================================== Actually, report retrieval function returns TWO report structures: * one for most suspicious point found so far (one with highest change in the directional derivative), so called "strongest" report * another one for most detailed line search (more function evaluations = easier to understand what's going on) which triggered test #0 criteria, so called "longest" report In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. You can plot function values stored in stp[] and f[] arrays and study behavior of your function by your own eyes, just to be sure that test correctly reported C1 violation. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ _optguardnonc1test0report_owner::_optguardnonc1test0report_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test0report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::optguardnonc1test0report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc1test0report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test0report)); alglib_impl::_optguardnonc1test0report_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _optguardnonc1test0report_owner::_optguardnonc1test0report_owner(const _optguardnonc1test0report_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test0report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc1test0report copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::optguardnonc1test0report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc1test0report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test0report)); alglib_impl::_optguardnonc1test0report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _optguardnonc1test0report_owner& _optguardnonc1test0report_owner::operator=(const _optguardnonc1test0report_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: optguardnonc1test0report assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc1test0report assignment constructor failure (source is not initialized)", &_state); alglib_impl::_optguardnonc1test0report_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test0report)); alglib_impl::_optguardnonc1test0report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _optguardnonc1test0report_owner::~_optguardnonc1test0report_owner() { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test0report_destroy(p_struct); ae_free(p_struct); } } alglib_impl::optguardnonc1test0report* _optguardnonc1test0report_owner::c_ptr() { return p_struct; } alglib_impl::optguardnonc1test0report* _optguardnonc1test0report_owner::c_ptr() const { return const_cast(p_struct); } optguardnonc1test0report::optguardnonc1test0report() : _optguardnonc1test0report_owner() ,positive(p_struct->positive),fidx(p_struct->fidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),f(&p_struct->f),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc1test0report::optguardnonc1test0report(const optguardnonc1test0report &rhs):_optguardnonc1test0report_owner(rhs) ,positive(p_struct->positive),fidx(p_struct->fidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),f(&p_struct->f),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc1test0report& optguardnonc1test0report::operator=(const optguardnonc1test0report &rhs) { if( this==&rhs ) return *this; _optguardnonc1test0report_owner::operator=(rhs); return *this; } optguardnonc1test0report::~optguardnonc1test0report() { } /************************************************************************* This structure is used for detailed reporting about suspected C1 continuity violation as flagged by C1 test #1 (OptGuard has several tests for C1 continuity, this report is used by #1). === WHAT IS TESTED ======================================================= C1 test #1 studies individual components of the gradient as recorded during line searches. Upon discovering discontinuity in the gradient this test records specific component which was suspected (or one with highest indication of discontinuity if multiple components are suspected). When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. === WHAT IS REPORTED ===================================================== Actually, report retrieval function returns TWO report structures: * one for most suspicious point found so far (one with highest change in the directional derivative), so called "strongest" report * another one for most detailed line search (more function evaluations = easier to understand what's going on) which triggered test #1 criteria, so called "longest" report In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. You can plot function values stored in stp[] and g[] arrays and study behavior of your function by your own eyes, just to be sure that test correctly reported C1 violation. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ _optguardnonc1test1report_owner::_optguardnonc1test1report_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test1report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::optguardnonc1test1report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc1test1report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test1report)); alglib_impl::_optguardnonc1test1report_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _optguardnonc1test1report_owner::_optguardnonc1test1report_owner(const _optguardnonc1test1report_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test1report_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc1test1report copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::optguardnonc1test1report*)alglib_impl::ae_malloc(sizeof(alglib_impl::optguardnonc1test1report), &_state); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test1report)); alglib_impl::_optguardnonc1test1report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _optguardnonc1test1report_owner& _optguardnonc1test1report_owner::operator=(const _optguardnonc1test1report_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: optguardnonc1test1report assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: optguardnonc1test1report assignment constructor failure (source is not initialized)", &_state); alglib_impl::_optguardnonc1test1report_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::optguardnonc1test1report)); alglib_impl::_optguardnonc1test1report_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _optguardnonc1test1report_owner::~_optguardnonc1test1report_owner() { if( p_struct!=NULL ) { alglib_impl::_optguardnonc1test1report_destroy(p_struct); ae_free(p_struct); } } alglib_impl::optguardnonc1test1report* _optguardnonc1test1report_owner::c_ptr() { return p_struct; } alglib_impl::optguardnonc1test1report* _optguardnonc1test1report_owner::c_ptr() const { return const_cast(p_struct); } optguardnonc1test1report::optguardnonc1test1report() : _optguardnonc1test1report_owner() ,positive(p_struct->positive),fidx(p_struct->fidx),vidx(p_struct->vidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),g(&p_struct->g),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc1test1report::optguardnonc1test1report(const optguardnonc1test1report &rhs):_optguardnonc1test1report_owner(rhs) ,positive(p_struct->positive),fidx(p_struct->fidx),vidx(p_struct->vidx),x0(&p_struct->x0),d(&p_struct->d),n(p_struct->n),stp(&p_struct->stp),g(&p_struct->g),cnt(p_struct->cnt),stpidxa(p_struct->stpidxa),stpidxb(p_struct->stpidxb) { } optguardnonc1test1report& optguardnonc1test1report::operator=(const optguardnonc1test1report &rhs) { if( this==&rhs ) return *this; _optguardnonc1test1report_owner::operator=(rhs); return *this; } optguardnonc1test1report::~optguardnonc1test1report() { } #endif #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* *************************************************************************/ _minlbfgsstate_owner::_minlbfgsstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlbfgsstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlbfgsstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlbfgsstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsstate)); alglib_impl::_minlbfgsstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlbfgsstate_owner::_minlbfgsstate_owner(const _minlbfgsstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlbfgsstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlbfgsstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlbfgsstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlbfgsstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsstate)); alglib_impl::_minlbfgsstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlbfgsstate_owner& _minlbfgsstate_owner::operator=(const _minlbfgsstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlbfgsstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlbfgsstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlbfgsstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsstate)); alglib_impl::_minlbfgsstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlbfgsstate_owner::~_minlbfgsstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minlbfgsstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlbfgsstate* _minlbfgsstate_owner::c_ptr() { return p_struct; } alglib_impl::minlbfgsstate* _minlbfgsstate_owner::c_ptr() const { return const_cast(p_struct); } minlbfgsstate::minlbfgsstate() : _minlbfgsstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minlbfgsstate::minlbfgsstate(const minlbfgsstate &rhs):_minlbfgsstate_owner(rhs) ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minlbfgsstate& minlbfgsstate::operator=(const minlbfgsstate &rhs) { if( this==&rhs ) return *this; _minlbfgsstate_owner::operator=(rhs); return *this; } minlbfgsstate::~minlbfgsstate() { } /************************************************************************* This structure stores optimization report: * IterationsCount total number of inner iterations * NFEV number of gradient evaluations * TerminationType termination type (see below) TERMINATION CODES TerminationType field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 terminated by user who called minlbfgsrequesttermination(). X contains point which was "current accepted" when termination request was submitted. Other fields of this structure are not documented and should not be used! *************************************************************************/ _minlbfgsreport_owner::_minlbfgsreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlbfgsreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlbfgsreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlbfgsreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsreport)); alglib_impl::_minlbfgsreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlbfgsreport_owner::_minlbfgsreport_owner(const _minlbfgsreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlbfgsreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlbfgsreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlbfgsreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlbfgsreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsreport)); alglib_impl::_minlbfgsreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlbfgsreport_owner& _minlbfgsreport_owner::operator=(const _minlbfgsreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlbfgsreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlbfgsreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlbfgsreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlbfgsreport)); alglib_impl::_minlbfgsreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlbfgsreport_owner::~_minlbfgsreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minlbfgsreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlbfgsreport* _minlbfgsreport_owner::c_ptr() { return p_struct; } alglib_impl::minlbfgsreport* _minlbfgsreport_owner::c_ptr() const { return const_cast(p_struct); } minlbfgsreport::minlbfgsreport() : _minlbfgsreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype) { } minlbfgsreport::minlbfgsreport(const minlbfgsreport &rhs):_minlbfgsreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype) { } minlbfgsreport& minlbfgsreport::operator=(const minlbfgsreport &rhs) { if( this==&rhs ) return *this; _minlbfgsreport_owner::operator=(rhs); return *this; } minlbfgsreport::~minlbfgsreport() { } /************************************************************************* LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using a quasi- Newton method (LBFGS scheme) which is optimized to use a minimum amount of memory. The subroutine generates the approximation of an inverse Hessian matrix by using information about the last M steps of the algorithm (instead of N). It lessens a required amount of memory from a value of order N^2 to a value of order 2*N*M. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinLBFGSCreate() call 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() and other functions 3. User calls MinLBFGSOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinLBFGSResults() to get solution 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLBFGSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension. N>0 M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - initial solution approximation, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLBFGSSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLBFGSSetStpMax() function to bound algorithm's steps. However, L-BFGS rarely needs such a tuning. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgscreate(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using a quasi- Newton method (LBFGS scheme) which is optimized to use a minimum amount of memory. The subroutine generates the approximation of an inverse Hessian matrix by using information about the last M steps of the algorithm (instead of N). It lessens a required amount of memory from a value of order N^2 to a value of order 2*N*M. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinLBFGSCreate() call 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() and other functions 3. User calls MinLBFGSOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinLBFGSResults() to get solution 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLBFGSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension. N>0 M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - initial solution approximation, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLBFGSSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLBFGSSetStpMax() function to bound algorithm's steps. However, L-BFGS rarely needs such a tuning. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgscreate(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* The subroutine is finite difference variant of MinLBFGSCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinLBFGSCreate() in order to get more information about creation of LBFGS optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinLBFGSSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. LBFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgscreatef(n, m, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* The subroutine is finite difference variant of MinLBFGSCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinLBFGSCreate() in order to get more information about creation of LBFGS optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinLBFGSSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. LBFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgscreatef(n, m, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets stopping conditions for L-BFGS optimization algorithm. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinLBFGSSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetcond(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinLBFGSOptimize(). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for LBFGS optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the LBFGS too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinLBFGSSetPrec...() functions. There is special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: default preconditioner (simple scaling, same for all elements of X) is used. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecdefault(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetprecdefault(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: Cholesky factorization of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state P - triangular preconditioner, Cholesky factorization of the approximate Hessian. array[0..N-1,0..N-1], (if larger, only leading N elements are used). IsUpper - whether upper or lower triangle of P is given (other triangle is not referenced) After call to this function preconditioner is changed to P (P is copied into the internal buffer). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: P should be nonsingular. Exception will be thrown otherwise. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetpreccholesky(const_cast(state.c_ptr()), const_cast(p.c_ptr()), isupper, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: D[i] should be positive. Exception will be thrown otherwise. NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetprecdiag(const_cast(state.c_ptr()), const_cast(d.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() call (before or after MinLBFGSSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecscale(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetprecscale(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minlbfgsiteration(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minlbfgsiteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minlbfgsoptimize(minlbfgsstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minlbfgsoptimize()' (func is NULL)", &_alglib_env_state); while( alglib_impl::minlbfgsiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minlbfgsoptimize(minlbfgsstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minlbfgsoptimize()' (grad is NULL)", &_alglib_env_state); while( alglib_impl::minlbfgsiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minlbfgsoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minlbfgssetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardgradient(const minlbfgsstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const ae_int_t level, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t level; level = 1; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minlbfgsoptguardgradient() for gradient verification * minlbfgsoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minlbfgsoptguardnonc1test0results() * minlbfgsoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardresults(const minlbfgsstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardnonc1test0results(const minlbfgsstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardnonc1test0results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardnonc1test1results(const minlbfgsstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsoptguardnonc1test1results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* L-BFGS algorithm results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report: * Rep.TerminationType completetion code: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -2 rounding errors prevent further improvement. X contains best point found. * -1 incorrect parameters were specified * 1 relative function improvement is no more than EpsF. * 2 relative step is no more than EpsX. * 4 gradient norm is no more than EpsG * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible * 8 terminated by user who called minlbfgsrequesttermination(). X contains point which was "current accepted" when termination request was submitted. * Rep.IterationsCount contains iterations count * NFEV countains number of function calculations -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* L-BFGS algorithm results Buffered implementation of MinLBFGSResults which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 20.08.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts LBFGS algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used to store algorithm state X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minlbfgsrequesttermination(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgsrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores nonlinear optimizer state. You should use functions provided by MinBLEIC subpackage to work with this object *************************************************************************/ _minbleicstate_owner::_minbleicstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbleicstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minbleicstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbleicstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbleicstate)); alglib_impl::_minbleicstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minbleicstate_owner::_minbleicstate_owner(const _minbleicstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbleicstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbleicstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minbleicstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbleicstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbleicstate)); alglib_impl::_minbleicstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minbleicstate_owner& _minbleicstate_owner::operator=(const _minbleicstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minbleicstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbleicstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minbleicstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minbleicstate)); alglib_impl::_minbleicstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minbleicstate_owner::~_minbleicstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minbleicstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minbleicstate* _minbleicstate_owner::c_ptr() { return p_struct; } alglib_impl::minbleicstate* _minbleicstate_owner::c_ptr() const { return const_cast(p_struct); } minbleicstate::minbleicstate() : _minbleicstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minbleicstate::minbleicstate(const minbleicstate &rhs):_minbleicstate_owner(rhs) ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minbleicstate& minbleicstate::operator=(const minbleicstate &rhs) { if( this==&rhs ) return *this; _minbleicstate_owner::operator=(rhs); return *this; } minbleicstate::~minbleicstate() { } /************************************************************************* This structure stores optimization report: * IterationsCount number of iterations * NFEV number of gradient evaluations * TerminationType termination type (see below) TERMINATION CODES TerminationType field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. -3 inconsistent constraints. Feasible point is either nonexistent or too hard to find. Try to restart optimizer with better initial approximation 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 terminated by user who called minbleicrequesttermination(). X contains point which was "current accepted" when termination request was submitted. ADDITIONAL FIELDS There are additional fields which can be used for debugging: * DebugEqErr error in the equality constraints (2-norm) * DebugFS f, calculated at projection of initial point to the feasible set * DebugFF f, calculated at the final point * DebugDX |X_start-X_final| *************************************************************************/ _minbleicreport_owner::_minbleicreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbleicreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minbleicreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbleicreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbleicreport)); alglib_impl::_minbleicreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minbleicreport_owner::_minbleicreport_owner(const _minbleicreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbleicreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbleicreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minbleicreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbleicreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbleicreport)); alglib_impl::_minbleicreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minbleicreport_owner& _minbleicreport_owner::operator=(const _minbleicreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minbleicreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbleicreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minbleicreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minbleicreport)); alglib_impl::_minbleicreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minbleicreport_owner::~_minbleicreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minbleicreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minbleicreport* _minbleicreport_owner::c_ptr() { return p_struct; } alglib_impl::minbleicreport* _minbleicreport_owner::c_ptr() const { return const_cast(p_struct); } minbleicreport::minbleicreport() : _minbleicreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),varidx(p_struct->varidx),terminationtype(p_struct->terminationtype),debugeqerr(p_struct->debugeqerr),debugfs(p_struct->debugfs),debugff(p_struct->debugff),debugdx(p_struct->debugdx),debugfeasqpits(p_struct->debugfeasqpits),debugfeasgpaits(p_struct->debugfeasgpaits),inneriterationscount(p_struct->inneriterationscount),outeriterationscount(p_struct->outeriterationscount) { } minbleicreport::minbleicreport(const minbleicreport &rhs):_minbleicreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),varidx(p_struct->varidx),terminationtype(p_struct->terminationtype),debugeqerr(p_struct->debugeqerr),debugfs(p_struct->debugfs),debugff(p_struct->debugff),debugdx(p_struct->debugdx),debugfeasqpits(p_struct->debugfeasqpits),debugfeasgpaits(p_struct->debugfeasgpaits),inneriterationscount(p_struct->inneriterationscount),outeriterationscount(p_struct->outeriterationscount) { } minbleicreport& minbleicreport::operator=(const minbleicreport &rhs) { if( this==&rhs ) return *this; _minbleicreport_owner::operator=(rhs); return *this; } minbleicreport::~minbleicreport() { } /************************************************************************* BOUND CONSTRAINED OPTIMIZATION WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BLEIC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBLEICCreate() call 2. USer adds boundary and/or linear constraints by calling MinBLEICSetBC() and MinBLEICSetLC() functions. 3. User sets stopping conditions with MinBLEICSetCond(). 4. User calls MinBLEICOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBLEICResults() to get solution 6. Optionally user may call MinBLEICRestartFrom() to solve another problem with same N but another starting point. MinBLEICRestartFrom() allows to reuse already initialized structure. NOTE: if you have box-only constraints (no general linear constraints), then MinBC optimizer can be better option. It uses special, faster constraint activation method, which performs better on problems with multiple constraints active at the solution. On small-scale problems performance of MinBC is similar to that of MinBLEIC, but on large-scale ones (hundreds and thousands of active constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleiccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BOUND CONSTRAINED OPTIMIZATION WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BLEIC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBLEICCreate() call 2. USer adds boundary and/or linear constraints by calling MinBLEICSetBC() and MinBLEICSetLC() functions. 3. User sets stopping conditions with MinBLEICSetCond(). 4. User calls MinBLEICOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBLEICResults() to get solution 6. Optionally user may call MinBLEICRestartFrom() to solve another problem with same N but another starting point. MinBLEICRestartFrom() allows to reuse already initialized structure. NOTE: if you have box-only constraints (no general linear constraints), then MinBC optimizer can be better option. It uses special, faster constraint activation method, which performs better on problems with multiple constraints active at the solution. On small-scale problems performance of MinBC is similar to that of MinBLEIC, but on large-scale ones (hundreds and thousands of active constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbleiccreate(const real_1d_array &x, minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleiccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* The subroutine is finite difference variant of MinBLEICCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinBLEICCreate() in order to get more information about creation of BLEIC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBLEICSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleiccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* The subroutine is finite difference variant of MinBLEICCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinBLEICCreate() in order to get more information about creation of BLEIC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBLEICSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbleiccreatef(const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleiccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets boundary constraints for BLEIC optimizer. Boundary constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinBLEICRestartFrom(). NOTE: if you have box-only constraints (no general linear constraints), then MinBC optimizer can be better option. It uses special, faster constraint activation method, which performs better on problems with multiple constraints active at the solution. On small-scale problems performance of MinBC is similar to that of MinBLEIC, but on large-scale ones (hundreds and thousands of active constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: this solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints, even when numerical differentiation is used (algorithm adjusts nodes according to boundary constraints) -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints for BLEIC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinBLEICRestartFrom(). INPUT PARAMETERS: State - structure previously allocated with MinBLEICCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about Epsilon in magnitude) due to rounding errors * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints for BLEIC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinBLEICRestartFrom(). INPUT PARAMETERS: State - structure previously allocated with MinBLEICCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about Epsilon in magnitude) due to rounding errors * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minbleicsetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinBLEICSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform slightly more than MaxIts iterations. I.e., MaxIts sets non-strict limit on iterations count. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetcond(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for BLEIC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the BLEIC too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinBLEICSetPrec...() functions. There is a special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minbleicsetscale(const minbleicstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecdefault(const minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetprecdefault(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE 1: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetprecdiag(const_cast(state.c_ptr()), const_cast(d.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinBLEICSetScale() call (before or after MinBLEICSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecscale(const minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetprecscale(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinBLEICOptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetxrep(const minbleicstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length IMPORTANT: this feature is hard to combine with preconditioning. You can't set upper limit on step length, when you solve optimization problem with linear (non-boundary) constraints AND preconditioner turned on. When non-boundary constraints are present, you have to either a) use preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! In this case algorithm will terminate with appropriate error code. INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetstpmax(const minbleicstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minbleiciteration(const minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minbleiciteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minbleicoptimize(minbleicstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minbleicoptimize()' (func is NULL)", &_alglib_env_state); while( alglib_impl::minbleiciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minbleicoptimize(minbleicstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minbleicoptimize()' (grad is NULL)", &_alglib_env_state); while( alglib_impl::minbleiciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minbleicoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minbleicsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardgradient(const minbleicstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardsmoothness(const minbleicstate &state, const ae_int_t level, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbleicoptguardsmoothness(const minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t level; level = 1; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minbleicoptguardgradient() for gradient verification * minbleicoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minbleicoptguardnonc1test0results() * minbleicoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardresults(const minbleicstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardnonc1test0results(const minbleicstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardnonc1test0results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardnonc1test1results(const minbleicstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicoptguardnonc1test1results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BLEIC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 inconsistent constraints. Feasible point is either nonexistent or too hard to find. Try to restart optimizer with better initial approximation * 1 relative function improvement is no more than EpsF. * 2 scaled step is no more than EpsX. * 4 scaled gradient norm is no more than EpsG. * 5 MaxIts steps was taken * 8 terminated by user who called minbleicrequesttermination(). X contains point which was "current accepted" when termination request was submitted. More information about fields of this structure can be found in the comments on MinBLEICReport datatype. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BLEIC results Buffered implementation of MinBLEICResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinBLEICCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minbleicrequesttermination(const minbleicstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores nonlinear optimizer state. You should use functions provided by MinQP subpackage to work with this object *************************************************************************/ _minqpstate_owner::_minqpstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minqpstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minqpstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minqpstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minqpstate)); alglib_impl::_minqpstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minqpstate_owner::_minqpstate_owner(const _minqpstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minqpstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minqpstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minqpstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minqpstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minqpstate)); alglib_impl::_minqpstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minqpstate_owner& _minqpstate_owner::operator=(const _minqpstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minqpstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minqpstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minqpstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minqpstate)); alglib_impl::_minqpstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minqpstate_owner::~_minqpstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minqpstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minqpstate* _minqpstate_owner::c_ptr() { return p_struct; } alglib_impl::minqpstate* _minqpstate_owner::c_ptr() const { return const_cast(p_struct); } minqpstate::minqpstate() : _minqpstate_owner() { } minqpstate::minqpstate(const minqpstate &rhs):_minqpstate_owner(rhs) { } minqpstate& minqpstate::operator=(const minqpstate &rhs) { if( this==&rhs ) return *this; _minqpstate_owner::operator=(rhs); return *this; } minqpstate::~minqpstate() { } /************************************************************************* This structure stores optimization report: * InnerIterationsCount number of inner iterations * OuterIterationsCount number of outer iterations * NCholesky number of Cholesky decomposition * NMV number of matrix-vector products (only products calculated as part of iterative process are counted) * TerminationType completion code (see below) * LagBC Lagrange multipliers for box constraints, array[N], not filled by QP-BLEIC solver * LagLC Lagrange multipliers for linear constraints, array[MSparse+MDense], ignored by QP-BLEIC solver === COMPLETION CODES ===================================================== Completion codes: * -9 failure of the automatic scale evaluation: one of the diagonal elements of the quadratic term is non-positive. Specify variable scales manually! * -5 inappropriate solver was used: * QuickQP solver for problem with general linear constraints (dense/sparse) * -4 BLEIC-QP or QuickQP solver found unconstrained direction of negative curvature (function is unbounded from below even under constraints), no meaningful minimum can be found. * -3 inconsistent constraints (or, maybe, feasible point is too hard to find). If you are sure that constraints are feasible, try to restart optimizer with better initial approximation. * -1 solver error * 1..4 successful completion * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. === LAGRANGE MULTIPLIERS ================================================= Some optimizers report values of Lagrange multipliers on successful completion (positive completion code): * DENSE-IPM-QP and SPARSE-IPM-QP return very precise Lagrange multipliers as determined during solution process. * DENSE-AUL-QP returns approximate Lagrange multipliers (which are very close to "true" Lagrange multipliers except for overconstrained or degenerate problems) Two arrays of multipliers are returned: * LagBC is array[N] which is loaded with multipliers from box constraints; LagBC[i]>0 means that I-th constraint is at the upper bound, LagBC[I]<0 means that I-th constraint is at the lower bound, LagBC[I]=0 means that I-th box constraint is inactive. * LagLC is array[MSparse+MDense] which is loaded with multipliers from general linear constraints (former MSparse elements corresponds to sparse part of the constraint matrix, latter MDense are for the dense constraints, as was specified by user). LagLC[i]>0 means that I-th constraint at the upper bound, LagLC[i]<0 means that I-th constraint is at the lower bound, LagLC[i]=0 means that I-th linear constraint is inactive. On failure (or when optimizer does not support Lagrange multipliers) these arrays are zero-filled. NOTE: methods from IPM family may also return meaningful Lagrange multipliers on completion with codes -3 (infeasibility detected) and -4 (unboundedness detected). It is possible that seeming infeasibility/unboundedness of the problem is due to rounding errors In this case last values of Lagrange multipliers are returned. *************************************************************************/ _minqpreport_owner::_minqpreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minqpreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minqpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minqpreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minqpreport)); alglib_impl::_minqpreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minqpreport_owner::_minqpreport_owner(const _minqpreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minqpreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minqpreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minqpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minqpreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minqpreport)); alglib_impl::_minqpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minqpreport_owner& _minqpreport_owner::operator=(const _minqpreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minqpreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minqpreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minqpreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minqpreport)); alglib_impl::_minqpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minqpreport_owner::~_minqpreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minqpreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minqpreport* _minqpreport_owner::c_ptr() { return p_struct; } alglib_impl::minqpreport* _minqpreport_owner::c_ptr() const { return const_cast(p_struct); } minqpreport::minqpreport() : _minqpreport_owner() ,inneriterationscount(p_struct->inneriterationscount),outeriterationscount(p_struct->outeriterationscount),nmv(p_struct->nmv),ncholesky(p_struct->ncholesky),terminationtype(p_struct->terminationtype),lagbc(&p_struct->lagbc),laglc(&p_struct->laglc) { } minqpreport::minqpreport(const minqpreport &rhs):_minqpreport_owner(rhs) ,inneriterationscount(p_struct->inneriterationscount),outeriterationscount(p_struct->outeriterationscount),nmv(p_struct->nmv),ncholesky(p_struct->ncholesky),terminationtype(p_struct->terminationtype),lagbc(&p_struct->lagbc),laglc(&p_struct->laglc) { } minqpreport& minqpreport::operator=(const minqpreport &rhs) { if( this==&rhs ) return *this; _minqpreport_owner::operator=(rhs); return *this; } minqpreport::~minqpreport() { } /************************************************************************* CONSTRAINED QUADRATIC PROGRAMMING The subroutine creates QP optimizer. After initial creation, it contains default optimization problem with zero quadratic and linear terms and no constraints. In order to actually solve something you should: * set cost vector with minqpsetlinearterm() * set variable bounds with minqpsetbc() or minqpsetbcall() * specify constraint matrix with one of the following functions: * modern API: * minqpsetlc2() for sparse two-sided constraints AL <= A*x <= AU * minqpsetlc2dense() for dense two-sided constraints AL <= A*x <= AU * minqpsetlc2mixed() for mixed two-sided constraints AL <= A*x <= AU * minqpaddlc2dense() to add one dense row to dense constraint submatrix * minqpaddlc2() to add one sparse row to sparse constraint submatrix * legacy API: * minqpsetlc() for dense one-sided equality/inequality constraints * minqpsetlcsparse() for sparse one-sided equality/inequality constraints * minqpsetlcmixed() for mixed dense/sparse one-sided equality/inequality constraints * choose appropriate QP solver and set it and its stopping criteria by means of minqpsetalgo??????() function * call minqpoptimize() to run the solver and minqpresults() to get the solution vector and additional information. Following solvers are recommended for convex and semidefinite problems: * QuickQP for dense problems with box-only constraints (or no constraints at all) * DENSE-IPM-QP for convex or semidefinite problems with medium (up to several thousands) variable count, dense/sparse quadratic term and any number (up to many thousands) of dense/sparse general linear constraints * SPARSE-IPM-QP for convex or semidefinite problems with large (many thousands) variable count, sparse quadratic term AND linear constraints. If your problem happens to be nonconvex, but either (a) is effectively convexified under constraints, or (b) has unique solution even with nonconvex target, then you can use: * QuickQP for dense nonconvex problems with box-only constraints * DENSE-AUL-QP for dense nonconvex problems which are effectively convexified under constraints with up to several thousands of variables and any (small or large) number of general linear constraints * QP-BLEIC for dense/sparse problems with small (up to several hundreds) number of general linear constraints and arbitrarily large variable count. INPUT PARAMETERS: N - problem size OUTPUT PARAMETERS: State - optimizer with zero quadratic/linear terms and no constraints -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpcreate(const ae_int_t n, minqpstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpcreate(n, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear term for QP solver. By default, linear term is zero. INPUT PARAMETERS: State - structure which stores algorithm state B - linear term, array[N]. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlinearterm(const_cast(state.c_ptr()), const_cast(b.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets dense quadratic term for QP solver. By default, quadratic term is zero. IMPORTANT: This solver minimizes following function: f(x) = 0.5*x'*A*x + b'*x. Note that quadratic term has 0.5 before it. So if you want to minimize f(x) = x^2 + x you should rewrite your problem as follows: f(x) = 0.5*(2*x^2) + x and your matrix A will be equal to [[2.0]], not to [[1.0]] INPUT PARAMETERS: State - structure which stores algorithm state A - matrix, array[N,N] IsUpper - (optional) storage type: * if True, symmetric matrix A is given by its upper triangle, and the lower triangle isn't used * if False, symmetric matrix A is given by its lower triangle, and the upper triangle isn't used * if not given, both lower and upper triangles must be filled. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetquadraticterm(const_cast(state.c_ptr()), const_cast(a.c_ptr()), isupper, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets dense quadratic term for QP solver. By default, quadratic term is zero. IMPORTANT: This solver minimizes following function: f(x) = 0.5*x'*A*x + b'*x. Note that quadratic term has 0.5 before it. So if you want to minimize f(x) = x^2 + x you should rewrite your problem as follows: f(x) = 0.5*(2*x^2) + x and your matrix A will be equal to [[2.0]], not to [[1.0]] INPUT PARAMETERS: State - structure which stores algorithm state A - matrix, array[N,N] IsUpper - (optional) storage type: * if True, symmetric matrix A is given by its upper triangle, and the lower triangle isn't used * if False, symmetric matrix A is given by its lower triangle, and the upper triangle isn't used * if not given, both lower and upper triangles must be filled. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; bool isupper; if( !alglib_impl::ae_is_symmetric(const_cast(a.c_ptr())) ) _ALGLIB_CPP_EXCEPTION("'a' parameter is not symmetric matrix"); isupper = false; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetquadraticterm(const_cast(state.c_ptr()), const_cast(a.c_ptr()), isupper, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets sparse quadratic term for QP solver. By default, quadratic term is zero. This function overrides previous calls to minqpsetquadraticterm() or minqpsetquadratictermsparse(). NOTE: dense solvers like DENSE-AUL-QP or DENSE-IPM-QP will convert this matrix to dense storage anyway. IMPORTANT: This solver minimizes following function: f(x) = 0.5*x'*A*x + b'*x. Note that quadratic term has 0.5 before it. So if you want to minimize f(x) = x^2 + x you should rewrite your problem as follows: f(x) = 0.5*(2*x^2) + x and your matrix A will be equal to [[2.0]], not to [[1.0]] INPUT PARAMETERS: State - structure which stores algorithm state A - matrix, array[N,N] IsUpper - (optional) storage type: * if True, symmetric matrix A is given by its upper triangle, and the lower triangle isn't used * if False, symmetric matrix A is given by its lower triangle, and the upper triangle isn't used * if not given, both lower and upper triangles must be filled. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetquadratictermsparse(const_cast(state.c_ptr()), const_cast(a.c_ptr()), isupper, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets starting point for QP solver. It is useful to have good initial approximation to the solution, because it will increase speed of convergence and identification of active constraints. NOTE: interior point solvers ignore initial point provided by user. INPUT PARAMETERS: State - structure which stores algorithm state X - starting point, array[N]. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetstartingpoint(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets origin for QP solver. By default, following QP program is solved: min(0.5*x'*A*x+b'*x) This function allows to solve different problem: min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) Specification of non-zero origin affects function being minimized, but not constraints. Box and linear constraints are still calculated without origin. INPUT PARAMETERS: State - structure which stores algorithm state XOrigin - origin, array[N]. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetorigin(const_cast(state.c_ptr()), const_cast(xorigin.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances) and as preconditioner. Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function If you do not know how to choose scales of your variables, you can: * read www.alglib.net/optimization/scaling.php article * use minqpsetscaleautodiag(), which calculates scale using diagonal of the quadratic term: S is set to 1/sqrt(diag(A)), which works well sometimes. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetscale(const minqpstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets automatic evaluation of variable scaling. IMPORTANT: this function works only for matrices with positive diagonal elements! Zero or negative elements will result in -9 error code being returned. Specify scale vector manually with minqpsetscale() in such cases. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances) and as preconditioner. The best way to set scaling is to manually specify variable scales. However, sometimes you just need quick-and-dirty solution - either when you perform fast prototyping, or when you know your problem well and you are 100% sure that this quick solution is robust enough in your case. One such solution is to evaluate scale of I-th variable as 1/Sqrt(A[i,i]), where A[i,i] is an I-th diagonal element of the quadratic term. Such approach works well sometimes, but you have to be careful here. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ void minqpsetscaleautodiag(const minqpstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetscaleautodiag(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells solver to use BLEIC-based algorithm and sets stopping criteria for the algorithm. This algorithm is intended for large-scale problems, possibly nonconvex, with small number of general linear constraints. Feasible initial point is essential for good performance. IMPORTANT: when DENSE-IPM (or DENSE-AUL for nonconvex problems) solvers are applicable, their performance is much better than that of BLEIC-QP. We recommend you to use BLEIC only when other solvers can not be used. ALGORITHM FEATURES: * supports dense and sparse QP problems * supports box and general linear equality/inequality constraints * can solve all types of problems (convex, semidefinite, nonconvex) as long as they are bounded from below under constraints. Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". Of course, global minimum is found only for positive definite and semidefinite problems. As for indefinite ones - only local minimum is found. ALGORITHM OUTLINE: * BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves quadratic programming problem as general linearly constrained optimization problem, which is solved by means of BLEIC solver (part of ALGLIB, active set method). ALGORITHM LIMITATIONS: * This algorithm is inefficient on problems with hundreds and thousands of general inequality constraints and infeasible initial point. Initial feasibility detection stage may take too long on such constraint sets. Consider using DENSE-IPM or DENSE-AUL instead. * unlike QuickQP solver, this algorithm does not perform Newton steps and does not use Level 3 BLAS. Being general-purpose active set method, it can activate constraints only one-by-one. Thus, its performance is lower than that of QuickQP. * its precision is also a bit inferior to that of QuickQP. BLEIC-QP performs only LBFGS steps (no Newton steps), which are good at detecting neighborhood of the solution, buy needs many iterations to find solution with more than 6 digits of precision. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} EpsX - >=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinQPSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. NOTE: this algorithm uses LBFGS iterations, which are relatively cheap, but improve function value only a bit. So you will need many iterations to converge - from 0.1*N to 10*N, depending on problem's condition number. IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (presently it is small step length, but it may change in the future versions of ALGLIB). -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetalgobleic(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells QP solver to use DENSE-AUL algorithm and sets stopping criteria for the algorithm. This algorithm is intended for non-convex problems with moderate (up to several thousands) variable count and arbitrary number of constraints which are either (a) effectively convexified under constraints or (b) have unique solution even with nonconvex target. IMPORTANT: when DENSE-IPM solver is applicable, its performance is usually much better than that of DENSE-AUL. We recommend you to use DENSE-AUL only when other solvers can not be used. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints * convergence is theoretically proved for positive-definite (convex) QP problems. Semidefinite and non-convex problems can be solved as long as they are bounded from below under constraints, although without theoretical guarantees. ALGORITHM OUTLINE: * this algorithm is an augmented Lagrangian method with dense preconditioner (hence its name). * it performs several outer iterations in order to refine values of the Lagrange multipliers. Single outer iteration is a solution of some unconstrained optimization problem: first it performs dense Cholesky factorization of the Hessian in order to build preconditioner (adaptive regularization is applied to enforce positive definiteness), and then it uses L-BFGS optimizer to solve optimization problem. * typically you need about 5-10 outer iterations to converge to solution ALGORITHM LIMITATIONS: * because dense Cholesky driver is used, this algorithm has O(N^2) memory requirements and O(OuterIterations*N^3) minimum running time. From the practical point of view, it limits its applicability by several thousands of variables. From the other side, variables count is the most limiting factor, and dependence on constraint count is much more lower. Assuming that constraint matrix is sparse, it may handle tens of thousands of general linear constraints. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0, stopping criteria for inner optimizer. Inner iterations are stopped when step length (with variable scaling being applied) is less than EpsX. See minqpsetscale() for more information on variable scaling. Rho - penalty coefficient, Rho>0: * large enough that algorithm converges with desired precision. * not TOO large to prevent ill-conditioning * recommended values are 100, 1000 or 10000 ItsCnt - number of outer iterations: * recommended values: 10-15 (although in most cases it converges within 5 iterations, you may need a few more to be sure). * ItsCnt=0 means that small number of outer iterations is automatically chosen (10 iterations in current version). * ItsCnt=1 means that AUL algorithm performs just as usual penalty method. * ItsCnt>1 means that AUL algorithm performs specified number of outer iterations IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic step length selection (specific step length chosen may change in the future versions of ALGLIB, so it is better to specify step length explicitly). -- ALGLIB -- Copyright 20.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetalgodenseaul(const_cast(state.c_ptr()), epsx, rho, itscnt, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells QP solver to use DENSE-IPM QP algorithm and sets stopping criteria for the algorithm. This algorithm is intended for convex and semidefinite problems with moderate (up to several thousands) variable count and arbitrary number of constraints. IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL or BLEIC-QP instead. If you try to run DENSE-IPM on problem with indefinite matrix (matrix having at least one negative eigenvalue) then depending on circumstances it may either (a) stall at some arbitrary point, or (b) throw exception on failure of Cholesky decomposition. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints ALGORITHM OUTLINE: * this algorithm is an implementation of interior point method as formulated by R.J.Vanderbei, with minor modifications to the algorithm (damped Newton directions are extensively used) * like all interior point methods, this algorithm tends to converge in roughly same number of iterations (between 15 and 30) independently from the problem dimensionality ALGORITHM LIMITATIONS: * because dense Cholesky driver is used, for N-dimensional problem with M dense constaints this algorithm has O(N^2+N*M) memory requirements and O(N^3+N*M^2) running time. Having sparse constraints with Z nonzeros per row relaxes storage and running time down to O(N^2+M*Z) and O(N^3+N*Z^2) From the practical point of view, it limits its applicability by several thousands of variables. From the other side, variables count is the most limiting factor, and dependence on constraint count is much more lower. Assuming that constraint matrix is sparse, it may handle tens of thousands of general linear constraints. INPUT PARAMETERS: State - structure which stores algorithm state Eps - >=0, stopping criteria. The algorithm stops when primal and dual infeasiblities as well as complementarity gap are less than Eps. IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. ===== TRACING IPM SOLVER ================================================= IPM solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'IPM' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. * 'IPM.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'IPM'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("IPM.DETAILED,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetalgodenseipm(const minqpstate &state, const double eps, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetalgodenseipm(const_cast(state.c_ptr()), eps, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells QP solver to use SPARSE-IPM QP algorithm and sets stopping criteria for the algorithm. This algorithm is intended for convex and semidefinite problems with large variable and constraint count and sparse quadratic term and constraints. It is possible to have some limited set of dense linear constraints - they will be handled separately by dense BLAS - but the more dense constraints you have, the more time solver needs. IMPORTANT: internally this solver performs large and sparse (N+M)x(N+M) triangular factorization. So it expects both quadratic term and constraints to be highly sparse. However, its running time is influenced by BOTH fill factor and sparsity pattern. Generally we expect that no more than few nonzero elements per row are present. However different sparsity patterns may result in completely different running times even given same fill factor. In many cases this algorithm outperforms DENSE-IPM by order of magnitude. However, in some cases you may get better results with DENSE-IPM even when solving sparse task. IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL or BLEIC-QP instead. If you try to run DENSE-IPM on problem with indefinite matrix (matrix having at least one negative eigenvalue) then depending on circumstances it may either (a) stall at some arbitrary point, or (b) throw exception on failure of Cholesky decomposition. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints * specializes on large-scale sparse problems ALGORITHM OUTLINE: * this algorithm is an implementation of interior point method as formulated by R.J.Vanderbei, with minor modifications to the algorithm (damped Newton directions are extensively used) * like all interior point methods, this algorithm tends to converge in roughly same number of iterations (between 15 and 30) independently from the problem dimensionality ALGORITHM LIMITATIONS: * this algorithm may handle moderate number of dense constraints, usually no more than a thousand of dense ones without losing its efficiency. INPUT PARAMETERS: State - structure which stores algorithm state Eps - >=0, stopping criteria. The algorithm stops when primal and dual infeasiblities as well as complementarity gap are less than Eps. IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetalgosparseipm(const minqpstate &state, const double eps, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetalgosparseipm(const_cast(state.c_ptr()), eps, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells solver to use QuickQP algorithm: special extra-fast algorithm for problems with box-only constrants. It may solve non-convex problems as long as they are bounded from below under constraints. ALGORITHM FEATURES: * several times faster than DENSE-IPM when running on box-only problem * utilizes accelerated methods for activation of constraints. * supports dense and sparse QP problems * supports ONLY box constraints; general linear constraints are NOT supported by this solver * can solve all types of problems (convex, semidefinite, nonconvex) as long as they are bounded from below under constraints. Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". In convex/semidefinite case global minimum is returned, in nonconvex case - algorithm returns one of the local minimums. ALGORITHM OUTLINE: * algorithm performs two kinds of iterations: constrained CG iterations and constrained Newton iterations * initially it performs small number of constrained CG iterations, which can efficiently activate/deactivate multiple constraints * after CG phase algorithm tries to calculate Cholesky decomposition and to perform several constrained Newton steps. If Cholesky decomposition failed (matrix is indefinite even under constraints), we perform more CG iterations until we converge to such set of constraints that system matrix becomes positive definite. Constrained Newton steps greatly increase convergence speed and precision. * algorithm interleaves CG and Newton iterations which allows to handle indefinite matrices (CG phase) and quickly converge after final set of constraints is found (Newton phase). Combination of CG and Newton phases is called "outer iteration". * it is possible to turn off Newton phase (beneficial for semidefinite problems - Cholesky decomposition will fail too often) ALGORITHM LIMITATIONS: * algorithm does not support general linear constraints; only box ones are supported * Cholesky decomposition for sparse problems is performed with Skyline Cholesky solver, which is intended for low-profile matrices. No profile- reducing reordering of variables is performed in this version of ALGLIB. * problems with near-zero negative eigenvalues (or exacty zero ones) may experience about 2-3x performance penalty. The reason is that Cholesky decomposition can not be performed until we identify directions of zero and negative curvature and activate corresponding boundary constraints - but we need a lot of trial and errors because these directions are hard to notice in the matrix spectrum. In this case you may turn off Newton phase of algorithm. Large negative eigenvalues are not an issue, so highly non-convex problems can be solved very efficiently. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} EpsX - >=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinQPSetScale() MaxOuterIts-maximum number of OUTER iterations. One outer iteration includes some amount of CG iterations (from 5 to ~N) and one or several (usually small amount) Newton steps. Thus, one outer iteration has high cost, but can greatly reduce funcation value. Use 0 if you do not want to limit number of outer iterations. UseNewton- use Newton phase or not: * Newton phase improves performance of positive definite dense problems (about 2 times improvement can be observed) * can result in some performance penalty on semidefinite or slightly negative definite problems - each Newton phase will bring no improvement (Cholesky failure), but still will require computational time. * if you doubt, you can turn off this phase - optimizer will retain its most of its high speed. IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (presently it is small step length, but it may change in the future versions of ALGLIB). -- ALGLIB -- Copyright 22.05.2014 by Bochkanov Sergey *************************************************************************/ void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetalgoquickqp(const_cast(state.c_ptr()), epsg, epsf, epsx, maxouterits, usenewton, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for QP solver Box constraints are inactive by default (after initial creation). After being set, they are preserved until explicitly overwritten with another minqpsetbc() or minqpsetbcall() call, or partially overwritten with minqpsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF (latter is recommended because it will allow solver to use better algorithm). BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF (latter is recommended because it will allow solver to use better algorithm). NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: if constraints for all variables are same you may use minqpsetbcall() which allows to specify constraints without using arrays. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for QP solver (all variables at once, same constraints for all variables) Box constraints are inactive by default (after initial creation). After being set, they are preserved until explicitly overwritten with another minqpsetbc() or minqpsetbcall() call, or partially overwritten with minqpsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd BndL=BndU lower bound BndL<=x[i] BndU=+INF upper bound x[i]<=BndU BndL=-INF range BndL<=x[i]<=BndU ... free variable - BndL=-INF, BndU+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound, same for all variables BndU - upper bound, same for all variables NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbcall(const minqpstate &state, const double bndl, const double bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetbcall(const_cast(state.c_ptr()), bndl, bndu, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for I-th variable (other variables are not modified). Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd BndL=BndU lower bound BndL<=x[i] BndU=+INF upper bound x[i]<=BndU BndL=-INF range BndL<=x[i]<=BndU ... free variable - BndL=-INF, BndU+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound BndU - upper bound NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbci(const minqpstate &state, const ae_int_t i, const double bndl, const double bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetbci(const_cast(state.c_ptr()), i, bndl, bndu, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets dense linear constraints for QP optimizer. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). After call to this function all non-box constraints are dropped, and you have only those constraints which were specified in the present call. If you want to specify mixed (with dense and sparse terms) linear constraints, you should call minqpsetlcmixed(). INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). -- ALGLIB -- Copyright 19.06.2012 by Bochkanov Sergey *************************************************************************/ void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets dense linear constraints for QP optimizer. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). After call to this function all non-box constraints are dropped, and you have only those constraints which were specified in the present call. If you want to specify mixed (with dense and sparse terms) linear constraints, you should call minqpsetlcmixed(). INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). -- ALGLIB -- Copyright 19.06.2012 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minqpsetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets sparse linear constraints for QP optimizer. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). After call to this function all non-box constraints are dropped, and you have only those constraints which were specified in the present call. If you want to specify mixed (with dense and sparse terms) linear constraints, you should call minqpsetlcmixed(). INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. C - linear constraints, sparse matrix with dimensions at least [K,N+1]. If matrix has larger size, only leading Kx(N+1) rectangle is used. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0 NOTE 1: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). -- ALGLIB -- Copyright 22.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlcsparse(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets mixed linear constraints, which include a set of dense rows, and a set of sparse rows. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). This function may be useful if constraint matrix includes large number of both types of rows - dense and sparse. If you have just a few sparse rows, you may represent them in dense format without loosing performance. Similarly, if you have just a few dense rows, you may store them in sparse format with almost same performance. INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. SparseC - linear constraints, sparse matrix with dimensions EXACTLY EQUAL TO [SparseK,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. SparseCT- type of sparse constraints, array[K]: * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1] * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x = SparseC[i,n+1] * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1] SparseK - number of sparse equality/inequality constraints, K>=0 DenseC - dense linear constraints, array[K,N+1]. Each row of DenseC represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of DenseC (including right part) must be finite. DenseCT - type of constraints, array[K]: * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1] * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x = DenseC[i,n+1] * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1] DenseK - number of equality/inequality constraints, DenseK>=0 NOTE 1: linear (non-box) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). NOTE 2: due to backward compatibility reasons SparseC can be larger than [SparseK,N+1]. In this case only leading [SparseK,N+1] submatrix will be used. However, the rest of ALGLIB has more strict requirements on the input size, so we recommend you to pass sparse term whose size exactly matches algorithm expectations. -- ALGLIB -- Copyright 22.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetlcmixed(const minqpstate &state, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlcmixed(const_cast(state.c_ptr()), const_cast(sparsec.c_ptr()), const_cast(sparsect.c_ptr()), sparsek, const_cast(densec.c_ptr()), const_cast(densect.c_ptr()), densek, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides legacy API for specification of mixed dense/sparse linear constraints. New conventions used by ALGLIB since release 3.16.0 state that set of sparse constraints comes first, followed by set of dense ones. This convention is essential when you talk about things like order of Lagrange multipliers. However, legacy API accepted mixed constraints in reverse order. This function is here to simplify situation with code relying on legacy API. It simply accepts constraints in one order (old) and passes them to new API, now in correct order. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlcmixedlegacy(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlcmixedlegacy(const_cast(state.c_ptr()), const_cast(densec.c_ptr()), const_cast(densect.c_ptr()), densek, const_cast(sparsec.c_ptr()), const_cast(sparsect.c_ptr()), sparsek, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with dense constraint matrix A. NOTE: knowing that constraint matrix is dense helps some QP solvers (especially modern IPM method) to utilize efficient dense Level 3 BLAS for dense parts of the problem. If your problem has both dense and sparse constraints, you can use minqpsetlc2mixed() function, which will result in dense algebra being applied to dense terms, and sparse sparse linear algebra applied to sparse terms. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2dense(const minqpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with dense constraint matrix A. NOTE: knowing that constraint matrix is dense helps some QP solvers (especially modern IPM method) to utilize efficient dense Level 3 BLAS for dense parts of the problem. If your problem has both dense and sparse constraints, you can use minqpsetlc2mixed() function, which will result in dense algebra being applied to dense terms, and sparse sparse linear algebra applied to sparse terms. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minqpsetlc2dense(const minqpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (a.rows()!=al.length()) || (a.rows()!=au.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minqpsetlc2dense': looks like one of arguments has wrong size"); k = a.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with sparse constraining matrix A. Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2(const minqpstate &state, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc2(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with mixed constraining matrix A including sparse part (first SparseK rows) and dense part (last DenseK rows). Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). This function may be useful if constraint matrix includes large number of both types of rows - dense and sparse. If you have just a few sparse rows, you may represent them in dense format without loosing performance. Similarly, if you have just a few dense rows, you may store them in sparse format with almost same performance. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. SparseA - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. SparseK - number of sparse constraints, SparseK>=0 DenseA - linear constraints, array[K,N], set of dense constraints. Each row of A represents one general linear constraint. DenseK - number of dense constraints, DenseK>=0 AL, AU - lower and upper bounds, array[SparseK+DenseK], with former SparseK elements corresponding to sparse constraints, and latter DenseK elements corresponding to dense constraints; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2mixed(const minqpstate &state, const sparsematrix &sparsea, const ae_int_t ksparse, const real_2d_array &densea, const ae_int_t kdense, const real_1d_array &al, const real_1d_array &au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpsetlc2mixed(const_cast(state.c_ptr()), const_cast(sparsea.c_ptr()), ksparse, const_cast(densea.c_ptr()), kdense, const_cast(al.c_ptr()), const_cast(au.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present dense constraints. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - linear constraint coefficient, array[N], right side is NOT included. AL, AU - lower and upper bounds; * AL=AU => equality constraint Ai*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint Ai*x<=AU * AU=+INF => one-sided constraint AL<=Ai*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minqpaddlc2dense(const minqpstate &state, const real_1d_array &a, const double al, const double au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpaddlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), al, au, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present sparse constraints. Constraint is passed in compressed format: as list of non-zero entries of coefficient vector A. Such approach is more efficient than dense storage for highly sparse constraint vectors. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. IdxA - array[NNZ], indexes of non-zero elements of A: * can be unsorted * can include duplicate indexes (corresponding entries of ValA[] will be summed) ValA - array[NNZ], values of non-zero elements of A NNZ - number of non-zero coefficients in A AL, AU - lower and upper bounds; * AL=AU => equality constraint A*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint A*x<=AU * AU=+INF => one-sided constraint AL<=A*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minqpaddlc2(const minqpstate &state, const integer_1d_array &idxa, const real_1d_array &vala, const ae_int_t nnz, const double al, const double au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpaddlc2(const_cast(state.c_ptr()), const_cast(idxa.c_ptr()), const_cast(vala.c_ptr()), nnz, al, au, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function solves quadratic programming problem. Prior to calling this function you should choose solver by means of one of the following functions: * minqpsetalgoquickqp() - for QuickQP solver * minqpsetalgobleic() - for BLEIC-QP solver * minqpsetalgodenseaul() - for Dense-AUL-QP solver * minqpsetalgodenseipm() - for Dense-IPM-QP solver These functions also allow you to control stopping criteria of the solver. If you did not set solver, MinQP subpackage will automatically select solver for your problem and will run it with default stopping criteria. However, it is better to set explicitly solver and its stopping criteria. INPUT PARAMETERS: State - algorithm state You should use MinQPResults() function to access results after calls to this function. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey. Special thanks to Elvira Illarionova for important suggestions on the linearly constrained QP algorithm. *************************************************************************/ void minqpoptimize(const minqpstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpoptimize(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* QP solver results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution. This array is allocated and initialized only when Rep.TerminationType parameter is positive (success). Rep - optimization report, contains: * completion code in Rep.TerminationType (positive values denote some kind of success, negative - failures) * Lagrange multipliers - for QP solvers which support then * other statistics See comments on minqpreport structure for more information -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* QP results Buffered implementation of MinQPResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minqpresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores linear solver state. You should use functions provided by MinLP subpackage to work with this object *************************************************************************/ _minlpstate_owner::_minlpstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlpstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlpstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlpstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlpstate)); alglib_impl::_minlpstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlpstate_owner::_minlpstate_owner(const _minlpstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlpstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlpstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlpstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlpstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlpstate)); alglib_impl::_minlpstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlpstate_owner& _minlpstate_owner::operator=(const _minlpstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlpstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlpstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlpstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlpstate)); alglib_impl::_minlpstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlpstate_owner::~_minlpstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minlpstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlpstate* _minlpstate_owner::c_ptr() { return p_struct; } alglib_impl::minlpstate* _minlpstate_owner::c_ptr() const { return const_cast(p_struct); } minlpstate::minlpstate() : _minlpstate_owner() { } minlpstate::minlpstate(const minlpstate &rhs):_minlpstate_owner(rhs) { } minlpstate& minlpstate::operator=(const minlpstate &rhs) { if( this==&rhs ) return *this; _minlpstate_owner::operator=(rhs); return *this; } minlpstate::~minlpstate() { } /************************************************************************* This structure stores optimization report: * f target function value * y dual variables * stats array[N+M], statuses of box (N) and linear (M) constraints: * stats[i]>0 => constraint at upper bound (also used for free non-basic variables set to zero) * stats[i]<0 => constraint at lower bound * stats[i]=0 => constraint is inactive, basic variable * primalerror primal feasibility error * dualerror dual feasibility error * iterationscount iteration count * terminationtype completion code (see below) Completion codes: * -4 LP problem is primal unbounded (dual infeasible) * -3 LP problem is primal infeasible (dual unbounded) * 1..4 successful completion * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. *************************************************************************/ _minlpreport_owner::_minlpreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlpreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlpreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlpreport)); alglib_impl::_minlpreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlpreport_owner::_minlpreport_owner(const _minlpreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlpreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlpreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlpreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlpreport)); alglib_impl::_minlpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlpreport_owner& _minlpreport_owner::operator=(const _minlpreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlpreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlpreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlpreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlpreport)); alglib_impl::_minlpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlpreport_owner::~_minlpreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minlpreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlpreport* _minlpreport_owner::c_ptr() { return p_struct; } alglib_impl::minlpreport* _minlpreport_owner::c_ptr() const { return const_cast(p_struct); } minlpreport::minlpreport() : _minlpreport_owner() ,f(p_struct->f),y(&p_struct->y),stats(&p_struct->stats),primalerror(p_struct->primalerror),dualerror(p_struct->dualerror),iterationscount(p_struct->iterationscount),terminationtype(p_struct->terminationtype) { } minlpreport::minlpreport(const minlpreport &rhs):_minlpreport_owner(rhs) ,f(p_struct->f),y(&p_struct->y),stats(&p_struct->stats),primalerror(p_struct->primalerror),dualerror(p_struct->dualerror),iterationscount(p_struct->iterationscount),terminationtype(p_struct->terminationtype) { } minlpreport& minlpreport::operator=(const minlpreport &rhs) { if( this==&rhs ) return *this; _minlpreport_owner::operator=(rhs); return *this; } minlpreport::~minlpreport() { } /************************************************************************* LINEAR PROGRAMMING The subroutine creates LP solver. After initial creation it contains default optimization problem with zero cost vector and all variables being fixed to zero values and no constraints. In order to actually solve something you should: * set cost vector with minlpsetcost() * set variable bounds with minlpsetbc() or minlpsetbcall() * specify constraint matrix with one of the following functions: [*] minlpsetlc() for dense one-sided constraints [*] minlpsetlc2dense() for dense two-sided constraints [*] minlpsetlc2() for sparse two-sided constraints [*] minlpaddlc2dense() to add one dense row to constraint matrix [*] minlpaddlc2() to add one row to constraint matrix (compressed format) * call minlpoptimize() to run the solver and minlpresults() to get the solution vector and additional information. Presently this optimizer supports only revised simplex method as underlying solver. DSE pricing and bounds flipping ratio test (aka long dual step) are supported. Large-scale sparse LU solver with Forest-Tomlin is used internally as linear algebra driver. Future releases of ALGLIB may introduce other solvers. INPUT PARAMETERS: N - problem size OUTPUT PARAMETERS: State - optimizer in the default state -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpcreate(const ae_int_t n, minlpstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpcreate(n, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets cost term for LP solver. By default, cost term is zero. INPUT PARAMETERS: State - structure which stores algorithm state C - cost term, array[N]. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetcost(const minlpstate &state, const real_1d_array &c, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetcost(const_cast(state.c_ptr()), const_cast(c.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients. ALGLIB optimizers use scaling matrices to test stopping conditions and as preconditioner. Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetscale(const minlpstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for LP solver (all variables at once, different constraints for different variables). The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Constraint status is preserved until constraints are explicitly overwritten with another minlpsetbc() call, overwritten with minlpsetbcall(), or partially overwritten with minlmsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. BndU - upper bounds, array[N]. NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: if constraints for all variables are same you may use minlpsetbcall() which allows to specify constraints without using arrays. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbc(const minlpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for LP solver (all variables at once, same constraints for all variables) The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Constraint status is preserved until constraints are explicitly overwritten with another minlpsetbc() call or partially overwritten with minlpsetbcall(). Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound, same for all variables BndU - upper bound, same for all variables NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: minlpsetbc() can be used to specify different constraints for different variables. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbcall(const minlpstate &state, const double bndl, const double bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetbcall(const_cast(state.c_ptr()), bndl, bndu, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets box constraints for I-th variable (other variables are not modified). The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state I - variable index, in [0,N) BndL - lower bound for I-th variable BndU - upper bound for I-th variable NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: minlpsetbc() can be used to specify different constraints for different variables. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbci(const minlpstate &state, const ae_int_t i, const double bndl, const double bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetbci(const_cast(state.c_ptr()), i, bndl, bndu, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets one-sided linear constraints A*x ~ AU, where "~" can be a mix of "<=", "=" and ">=". IMPORTANT: this function is provided here for compatibility with the rest of ALGLIB optimizers which accept constraints in format like this one. Many real-life problems feature two-sided constraints like a0 <= a*x <= a1. It is really inefficient to add them as a pair of one-sided constraints. Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2() (or its sparse version) wherever possible. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N+1]. Each row of A represents one constraint, with first N elements being linear coefficients, and last element being right side. CT - constraint types, array[K]: * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n] * if CT[i]=0, then I-th constraint is A[i,*]*x = A[i,n] * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n] K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A and CT. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetlc(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets one-sided linear constraints A*x ~ AU, where "~" can be a mix of "<=", "=" and ">=". IMPORTANT: this function is provided here for compatibility with the rest of ALGLIB optimizers which accept constraints in format like this one. Many real-life problems feature two-sided constraints like a0 <= a*x <= a1. It is really inefficient to add them as a pair of one-sided constraints. Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2() (or its sparse version) wherever possible. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N+1]. Each row of A represents one constraint, with first N elements being linear coefficients, and last element being right side. CT - constraint types, array[K]: * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n] * if CT[i]=0, then I-th constraint is A[i,*]*x = A[i,n] * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n] K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A and CT. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (a.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minlpsetlc': looks like one of arguments has wrong size"); k = a.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetlc(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU. This version accepts dense matrix as input; internally LP solver uses sparse storage anyway (most LP problems are sparse), but for your convenience it may accept dense inputs. This function overwrites linear constraints set by previous calls (if such calls were made). We recommend you to use sparse version of this function unless you solve small-scale LP problem (less than few hundreds of variables). NOTE: there also exist several versions of this function: * one-sided dense version which accepts constraints in the same format as one used by QP and NLP solvers * two-sided sparse version which accepts sparse matrix * two-sided dense version which allows you to add constraints row by row * two-sided sparse version which allows you to add constraints row by row INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU. This version accepts dense matrix as input; internally LP solver uses sparse storage anyway (most LP problems are sparse), but for your convenience it may accept dense inputs. This function overwrites linear constraints set by previous calls (if such calls were made). We recommend you to use sparse version of this function unless you solve small-scale LP problem (less than few hundreds of variables). NOTE: there also exist several versions of this function: * one-sided dense version which accepts constraints in the same format as one used by QP and NLP solvers * two-sided sparse version which accepts sparse matrix * two-sided dense version which allows you to add constraints row by row * two-sided sparse version which allows you to add constraints row by row INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (a.rows()!=al.length()) || (a.rows()!=au.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minlpsetlc2dense': looks like one of arguments has wrong size"); k = a.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with sparse constraining matrix A. Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc2(const minlpstate &state, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpsetlc2(const_cast(state.c_ptr()), const_cast(a.c_ptr()), const_cast(al.c_ptr()), const_cast(au.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present constraints. This version accepts dense constraint vector as input, but sparsifies it for internal storage and processing. Thus, time to add one constraint in is O(N) - we have to scan entire array of length N. Sparse version of this function is order of magnitude faster for constraints with just a few nonzeros per row. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraint coefficient, array[N], right side is NOT included. AL, AU - lower and upper bounds; * AL=AU => equality constraint Ai*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint Ai*x<=AU * AU=+INF => one-sided constraint AL<=Ai*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpaddlc2dense(const minlpstate &state, const real_1d_array &a, const double al, const double au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpaddlc2dense(const_cast(state.c_ptr()), const_cast(a.c_ptr()), al, au, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present constraints. Constraint is passed in compressed format: as list of non-zero entries of coefficient vector A. Such approach is more efficient than dense storage for highly sparse constraint vectors. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. IdxA - array[NNZ], indexes of non-zero elements of A: * can be unsorted * can include duplicate indexes (corresponding entries of ValA[] will be summed) ValA - array[NNZ], values of non-zero elements of A NNZ - number of non-zero coefficients in A AL, AU - lower and upper bounds; * AL=AU => equality constraint A*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint A*x<=AU * AU=+INF => one-sided constraint AL<=A*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpaddlc2(const minlpstate &state, const integer_1d_array &idxa, const real_1d_array &vala, const ae_int_t nnz, const double al, const double au, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpaddlc2(const_cast(state.c_ptr()), const_cast(idxa.c_ptr()), const_cast(vala.c_ptr()), nnz, al, au, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function solves LP problem. INPUT PARAMETERS: State - algorithm state You should use minlpresults() function to access results after calls to this function. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey. *************************************************************************/ void minlpoptimize(const minlpstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpoptimize(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* LP solver results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[N], solution. Filled by zeros on failure. Rep - optimization report. You should check Rep.TerminationType, which contains completion code, and you may check another fields which contain another information about algorithm functioning. Failure codes returned by algorithm are: * -4 LP problem is primal unbounded (dual infeasible) * -3 LP problem is primal infeasible (dual unbounded) Success codes: * 1..4 successful completion * 5 MaxIts steps was taken -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minlpresults(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* LP results Buffered implementation of MinLPResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minlpresultsbuf(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlpresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores nonlinear optimizer state. You should use functions provided by MinNLC subpackage to work with this object *************************************************************************/ _minnlcstate_owner::_minnlcstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnlcstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minnlcstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnlcstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnlcstate)); alglib_impl::_minnlcstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minnlcstate_owner::_minnlcstate_owner(const _minnlcstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnlcstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnlcstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minnlcstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnlcstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnlcstate)); alglib_impl::_minnlcstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minnlcstate_owner& _minnlcstate_owner::operator=(const _minnlcstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minnlcstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnlcstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minnlcstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minnlcstate)); alglib_impl::_minnlcstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minnlcstate_owner::~_minnlcstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minnlcstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minnlcstate* _minnlcstate_owner::c_ptr() { return p_struct; } alglib_impl::minnlcstate* _minnlcstate_owner::c_ptr() const { return const_cast(p_struct); } minnlcstate::minnlcstate() : _minnlcstate_owner() ,needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),j(&p_struct->j),x(&p_struct->x) { } minnlcstate::minnlcstate(const minnlcstate &rhs):_minnlcstate_owner(rhs) ,needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),j(&p_struct->j),x(&p_struct->x) { } minnlcstate& minnlcstate::operator=(const minnlcstate &rhs) { if( this==&rhs ) return *this; _minnlcstate_owner::operator=(rhs); return *this; } minnlcstate::~minnlcstate() { } /************************************************************************* These fields store optimization report: * iterationscount total number of inner iterations * nfev number of gradient evaluations * terminationtype termination type (see below) Scaled constraint violations are reported: * bcerr maximum violation of the box constraints * bcidx index of the most violated box constraint (or -1, if all box constraints are satisfied or there is no box constraint) * lcerr maximum violation of the linear constraints, computed as maximum scaled distance between final point and constraint boundary. * lcidx index of the most violated linear constraint (or -1, if all constraints are satisfied or there is no general linear constraints) * nlcerr maximum violation of the nonlinear constraints * nlcidx index of the most violated nonlinear constraint (or -1, if all constraints are satisfied or there is no nonlinear constraints) Violations of box constraints are scaled on per-component basis according to the scale vector s[] as specified by minnlcsetscale(). Violations of the general linear constraints are also computed using user-supplied variable scaling. Violations of nonlinear constraints are computed "as is" TERMINATION CODES TerminationType field contains completion code, which can be either: === FAILURE CODE === -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signaled. -3 box constraints are infeasible. Note: infeasibility of non-box constraints does NOT trigger emergency completion; you have to examine bcerr/lcerr/nlcerr to detect possibly inconsistent constraints. === SUCCESS CODE === 2 relative step is no more than EpsX. 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 user requested algorithm termination via minnlcrequesttermination(), last accepted point is returned Other fields of this structure are not documented and should not be used! *************************************************************************/ _minnlcreport_owner::_minnlcreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnlcreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minnlcreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnlcreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnlcreport)); alglib_impl::_minnlcreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minnlcreport_owner::_minnlcreport_owner(const _minnlcreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnlcreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnlcreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minnlcreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnlcreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnlcreport)); alglib_impl::_minnlcreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minnlcreport_owner& _minnlcreport_owner::operator=(const _minnlcreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minnlcreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnlcreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minnlcreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minnlcreport)); alglib_impl::_minnlcreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minnlcreport_owner::~_minnlcreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minnlcreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minnlcreport* _minnlcreport_owner::c_ptr() { return p_struct; } alglib_impl::minnlcreport* _minnlcreport_owner::c_ptr() const { return const_cast(p_struct); } minnlcreport::minnlcreport() : _minnlcreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype),bcerr(p_struct->bcerr),bcidx(p_struct->bcidx),lcerr(p_struct->lcerr),lcidx(p_struct->lcidx),nlcerr(p_struct->nlcerr),nlcidx(p_struct->nlcidx),dbgphase0its(p_struct->dbgphase0its) { } minnlcreport::minnlcreport(const minnlcreport &rhs):_minnlcreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype),bcerr(p_struct->bcerr),bcidx(p_struct->bcidx),lcerr(p_struct->lcerr),lcidx(p_struct->lcidx),nlcerr(p_struct->nlcerr),nlcidx(p_struct->nlcidx),dbgphase0its(p_struct->dbgphase0its) { } minnlcreport& minnlcreport::operator=(const minnlcreport &rhs) { if( this==&rhs ) return *this; _minnlcreport_owner::operator=(rhs); return *this; } minnlcreport::~minnlcreport() { } /************************************************************************* NONLINEARLY CONSTRAINED OPTIMIZATION WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 REQUIREMENTS: * user must provide function value and gradient for F(), H(), G() * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuously differentiable on the feasible set and its neighborhood * nonlinear constraints G() and H() must have non-zero gradient at G(x)=0 and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is NOT supported. USAGE: Constrained optimization if far more complex than the unconstrained one. Nonlinearly constrained optimization is one of the most esoteric numerical procedures. Here we give very brief outline of the MinNLC optimizer. We strongly recommend you to study examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinNLCCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * SQP solver, recommended for medium-scale problems (less than thousand of variables) with hard-to-evaluate target functions. Requires less function evaluations than other solvers but each step involves solution of QP subproblem, so running time may be higher than that of AUL (another recommended option). Activated with minnlcsetalgosqp() function. * AUL solver with dense preconditioner, recommended for large-scale problems or for problems with cheap target function. Needs more function evaluations that SQP (about 5x-10x times more), but its iterations are much cheaper that that of SQP. Activated with minnlcsetalgoaul() function. * SLP solver, successive linear programming. The slowest one, requires more target function evaluations that SQP and AUL. However, it is somewhat more robust in tricky cases, so it can be used as a backup plan. Activated with minnlcsetalgoslp() function. 2. [optional] user activates OptGuard integrity checker which tries to detect possible errors in the user-supplied callbacks: * discontinuity/nonsmoothness of the target/nonlinear constraints * errors in the analytic gradient provided by user This feature is essential for early prototyping stages because it helps to catch common coding and problem statement errors. OptGuard can be activated with following functions (one per each check performed): * minnlcoptguardsmoothness() * minnlcoptguardgradient() 3. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) minnlcsetbc() for boundary constraints b) minnlcsetlc() for linear constraints c) minnlcsetnlc() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 4. User sets scale of the variables with minnlcsetscale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 5. User sets stopping conditions with minnlcsetcond(). If NLC solver uses inner/outer iteration layout, this function sets stopping conditions for INNER iterations. 6. Finally, user calls minnlcoptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G/H. 7. User calls minnlcresults() to get solution; additionally you can retrieve OptGuard report with minnlcoptguardresults(), and get detailed report about purported errors in the target function with: * minnlcoptguardnonc1test0results() * minnlcoptguardnonc1test1results() 8. Optionally user may call minnlcrestartfrom() to solve another problem with same N but another starting point. minnlcrestartfrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* NONLINEARLY CONSTRAINED OPTIMIZATION WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 REQUIREMENTS: * user must provide function value and gradient for F(), H(), G() * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuously differentiable on the feasible set and its neighborhood * nonlinear constraints G() and H() must have non-zero gradient at G(x)=0 and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is NOT supported. USAGE: Constrained optimization if far more complex than the unconstrained one. Nonlinearly constrained optimization is one of the most esoteric numerical procedures. Here we give very brief outline of the MinNLC optimizer. We strongly recommend you to study examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinNLCCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * SQP solver, recommended for medium-scale problems (less than thousand of variables) with hard-to-evaluate target functions. Requires less function evaluations than other solvers but each step involves solution of QP subproblem, so running time may be higher than that of AUL (another recommended option). Activated with minnlcsetalgosqp() function. * AUL solver with dense preconditioner, recommended for large-scale problems or for problems with cheap target function. Needs more function evaluations that SQP (about 5x-10x times more), but its iterations are much cheaper that that of SQP. Activated with minnlcsetalgoaul() function. * SLP solver, successive linear programming. The slowest one, requires more target function evaluations that SQP and AUL. However, it is somewhat more robust in tricky cases, so it can be used as a backup plan. Activated with minnlcsetalgoslp() function. 2. [optional] user activates OptGuard integrity checker which tries to detect possible errors in the user-supplied callbacks: * discontinuity/nonsmoothness of the target/nonlinear constraints * errors in the analytic gradient provided by user This feature is essential for early prototyping stages because it helps to catch common coding and problem statement errors. OptGuard can be activated with following functions (one per each check performed): * minnlcoptguardsmoothness() * minnlcoptguardgradient() 3. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) minnlcsetbc() for boundary constraints b) minnlcsetlc() for linear constraints c) minnlcsetnlc() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 4. User sets scale of the variables with minnlcsetscale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 5. User sets stopping conditions with minnlcsetcond(). If NLC solver uses inner/outer iteration layout, this function sets stopping conditions for INNER iterations. 6. Finally, user calls minnlcoptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G/H. 7. User calls minnlcresults() to get solution; additionally you can retrieve OptGuard report with minnlcoptguardresults(), and get detailed report about purported errors in the target function with: * minnlcoptguardnonc1test0results() * minnlcoptguardnonc1test1results() 8. Optionally user may call minnlcrestartfrom() to solve another problem with same N but another starting point. minnlcrestartfrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnlccreate(const real_1d_array &x, minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This subroutine is a finite difference variant of MinNLCCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinNLCCreate() in order to get more information about creation of NLC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinNLCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large TRUNCATION errors, while too small step will result in too large NUMERICAL errors. 1.0E-4 can be good value to start from. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine is a finite difference variant of MinNLCCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinNLCCreate() in order to get more information about creation of NLC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinNLCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large TRUNCATION errors, while too small step will result in too large NUMERICAL errors. 1.0E-4 can be good value to start from. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnlccreatef(const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets boundary constraints for NLC optimizer. Boundary constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinNLCRestartFrom(). You may combine boundary constraints with general linear ones - and with nonlinear ones! Boundary constraints are handled more efficiently than other types. Thus, if your problem has mixed constraints, you may explicitly specify some of them as boundary and save some time/space. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: when you solve your problem with augmented Lagrangian solver, boundary constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints for MinNLC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinNLCRestartFrom(). You may combine linear constraints with boundary ones - and with nonlinear ones! If your problem has mixed constraints, you may explicitly specify some of them as linear. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: when you solve your problem with augmented Lagrangian solver, linear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints for MinNLC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinNLCRestartFrom(). You may combine linear constraints with boundary ones - and with nonlinear ones! If your problem has mixed constraints, you may explicitly specify some of them as linear. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: when you solve your problem with augmented Lagrangian solver, linear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minnlcsetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets nonlinear constraints for MinNLC optimizer. In fact, this function sets NUMBER of nonlinear constraints. Constraints itself (constraint functions) are passed to MinNLCOptimize() method. This method requires user-defined vector function F[] and its Jacobian J[], where: * first component of F[] and first row of Jacobian J[] corresponds to function being minimized * next NLEC components of F[] (and rows of J) correspond to nonlinear equality constraints G_i(x)=0 * next NLIC components of F[] (and rows of J) correspond to nonlinear inequality constraints H_i(x)<=0 NOTE: you may combine nonlinear constraints with linear/boundary ones. If your problem has mixed constraints, you may explicitly specify some of them as linear ones. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 NOTE 1: when you solve your problem with augmented Lagrangian solver, nonlinear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! NOTE 2: algorithm scales variables according to scale specified by MinNLCSetScale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, there is no way to automatically scale nonlinear constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may ruin convergence. Solving problem with constraint "1000*G0(x)=0" is NOT same as solving it with constraint "0.001*G0(x)=0". It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to scale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has approximately unit magnitude (for problems with unit scale) or has magnitude approximately equal to 1/S[i] (where S is a scale set by MinNLCSetScale() function). -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetnlc(const_cast(state.c_ptr()), nlec, nlic, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets stopping conditions for inner iterations of optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinNLCSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic selection of the stopping condition. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetcond(const minnlcstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetcond(const_cast(state.c_ptr()), epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for NLC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetscale(const minnlcstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets preconditioner to "inexact LBFGS-based" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). Inexact LBFGS-based preconditioner uses L-BFGS formula combined with orthogonality assumption to perform very fast updates. For a N-dimensional problem with K general linear or nonlinear constraints (boundary ones are not counted) it has O(N*K) cost per iteration. This preconditioner has best quality (less iterations) when general linear and nonlinear constraints are orthogonal to each other (orthogonality with respect to boundary constraints is not required). Number of iterations increases when constraints are non-orthogonal, because algorithm assumes orthogonality, but still it is better than no preconditioner at all. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecinexact(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetprecinexact(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets preconditioner to "exact low rank" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). It also provides special unpreconditioned mode of operation which can be used for test purposes. Comments below discuss low rank preconditioner. Exact low-rank preconditioner uses Woodbury matrix identity to build quadratic model of the penalized function. It has following features: * no special assumptions about orthogonality of constraints * preconditioner evaluation is optimized for K<=N. * finally, stability of the process is guaranteed only for K<=N due to degeneracy of intermediate matrices. That's why we recommend to use "exact robust" preconditioner for such cases. RECOMMENDATIONS We recommend to choose between "exact low rank" and "exact robust" preconditioners, with "low rank" version being chosen when you know in advance that total count of non-box constraints won't exceed N, and "robust" version being chosen when you need bulletproof solution. INPUT PARAMETERS: State - structure stores algorithm state UpdateFreq- update frequency. Preconditioner is rebuilt after every UpdateFreq iterations. Recommended value: 10 or higher. Zero value means that good default value will be used. -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetprecexactlowrank(const_cast(state.c_ptr()), updatefreq, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets preconditioner to "exact robust" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). It also provides special unpreconditioned mode of operation which can be used for test purposes. Comments below discuss robust preconditioner. Exact robust preconditioner uses Cholesky decomposition to invert approximate Hessian matrix H=D+W'*C*W (where D stands for diagonal terms of Hessian, combined result of initial scaling matrix and penalty from box constraints; W stands for general linear constraints and linearization of nonlinear ones; C stands for diagonal matrix of penalty coefficients). This preconditioner has following features: * no special assumptions about constraint structure * preconditioner is optimized for stability; unlike "exact low rank" version which fails for K>=N, this one works well for any value of K. * the only drawback is that is takes O(N^3+K*N^2) time to build it. No economical Woodbury update is applied even when it makes sense, thus there are exist situations (K<(state.c_ptr()), updatefreq, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets preconditioner to "turned off" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may utilize two preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. It also provides special unpreconditioned mode of operation which can be used for test purposes. This function activates this test mode. Do not use it in production code to solve real-life problems. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecnone(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetprecnone(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length (after scaling of step vector with respect to variable scales specified by minnlcsetscale() call). INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: different solvers employed by MinNLC optimizer use different norms for step; AUL solver uses 2-norm, whilst SLP solver uses INF-norm. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minnlcsetstpmax(const minnlcstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells MinNLC unit to use Augmented Lagrangian algorithm for nonlinearly constrained optimization. This algorithm is a slight modification of one described in "A Modified Barrier-Augmented Lagrangian Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK, K. SCHEINBERG, I.YUZEFOVICH. AUL solver can be significantly faster than SQP on easy problems due to cheaper iterations, although it needs more function evaluations. Augmented Lagrangian algorithm works by converting problem of minimizing F(x) subject to equality/inequality constraints to unconstrained problem of the form min[ f(x) + + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) + + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ] where: * Rho is a fixed penalization coefficient * PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce equality constraints * SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune" equality constraints, greatly increasing precision * PENALTY_INEQ(x) is a penalty term which is used to approximately enforce inequality constraints * SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune" inequality constraints, greatly increasing precision * Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during outer iterations of algorithm This version of AUL algorithm uses preconditioner, which greatly accelerates convergence. Because this algorithm is similar to penalty methods, it may perform steps into infeasible area. All kinds of constraints (boundary, linear and nonlinear ones) may be violated in intermediate points - and in the solution. However, properly configured AUL method is significantly better at handling constraints than barrier and/or penalty methods. The very basic outline of algorithm is given below: 1) first outer iteration is performed with "default" values of Lagrange multipliers Nu1/Nu2. Solution quality is low (candidate point can be too far away from true solution; large violation of constraints is possible) and is comparable with that of penalty methods. 2) subsequent outer iterations refine Lagrange multipliers and improve quality of the solution. INPUT PARAMETERS: State - structure which stores algorithm state Rho - penalty coefficient, Rho>0: * large enough that algorithm converges with desired precision. Minimum value is 10*max(S'*diag(H)*S), where S is a scale matrix (set by MinNLCSetScale) and H is a Hessian of the function being minimized. If you can not easily estimate Hessian norm, see our recommendations below. * not TOO large to prevent ill-conditioning * for unit-scale problems (variables and Hessian have unit magnitude), Rho=100 or Rho=1000 can be used. * it is important to note that Rho is internally multiplied by scaling matrix, i.e. optimum value of Rho depends on scale of variables specified by MinNLCSetScale(). ItsCnt - number of outer iterations: * ItsCnt=0 means that small number of outer iterations is automatically chosen (10 iterations in current version). * ItsCnt=1 means that AUL algorithm performs just as usual barrier method. * ItsCnt>1 means that AUL algorithm performs specified number of outer iterations HOW TO CHOOSE PARAMETERS Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm is sometimes hard to tune. Good values of Rho and ItsCnt are problem- specific. In order to help you we prepared following set of recommendations: * for unit-scale problems (variables and Hessian have unit magnitude), Rho=100 or Rho=1000 can be used. * start from some small value of Rho and solve problem with just one outer iteration (ItcCnt=1). In this case algorithm behaves like penalty method. Increase Rho in 2x or 10x steps until you see that one outer iteration returns point which is "rough approximation to solution". It is very important to have Rho so large that penalty term becomes constraining i.e. modified function becomes highly convex in constrained directions. From the other side, too large Rho may prevent you from converging to the solution. You can diagnose it by studying number of inner iterations performed by algorithm: too few (5-10 on 1000-dimensional problem) or too many (orders of magnitude more than dimensionality) usually means that Rho is too large. * with just one outer iteration you usually have low-quality solution. Some constraints can be violated with very large margin, while other ones (which are NOT violated in the true solution) can push final point too far in the inner area of the feasible set. For example, if you have constraint x0>=0 and true solution x0=1, then merely a presence of "x0>=0" will introduce a bias towards larger values of x0. Say, algorithm may stop at x0=1.5 instead of 1.0. * after you found good Rho, you may increase number of outer iterations. ItsCnt=10 is a good value. Subsequent outer iteration will refine values of Lagrange multipliers. Constraints which were violated will be enforced, inactive constraints will be dropped (corresponding multipliers will be decreased). Ideally, you should see 10-1000x improvement in constraint handling (constraint violation is reduced). * if you see that algorithm converges to vicinity of solution, but additional outer iterations do not refine solution, it may mean that algorithm is unstable - it wanders around true solution, but can not approach it. Sometimes algorithm may be stabilized by increasing Rho one more time, making it 5x or 10x larger. SCALING OF CONSTRAINTS [IMPORTANT] AUL optimizer scales variables according to scale specified by MinNLCSetScale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, because function being optimized is a mix of original function and constraint-dependent penalty functions, it is important to rescale both variables AND constraints. Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have constraint whose scale is different from that of target function (another example is 0.000001*x>=0). It is also possible to have constraints whose scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT same as minimizing it subject to 1000000*x>=0. Because we know coefficients of boundary/linear constraints, we can automatically rescale and normalize them. However, there is no way to automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are black boxes. It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has magnitude approximately equal to 1/S[i] (where S is a scale set by MinNLCSetScale() function). WHAT IF IT DOES NOT CONVERGE? It is possible that AUL algorithm fails to converge to precise values of Lagrange multipliers. It stops somewhere around true solution, but candidate point is still too far from solution, and some constraints are violated. Such kind of failure is specific for Lagrangian algorithms - technically, they stop at some point, but this point is not constrained solution. There are exist several reasons why algorithm may fail to converge: a) too loose stopping criteria for inner iteration b) degenerate, redundant constraints c) target function has unconstrained extremum exactly at the boundary of some constraint d) numerical noise in the target function In all these cases algorithm is unstable - each outer iteration results in large and almost random step which improves handling of some constraints, but violates other ones (ideally outer iterations should form a sequence of progressively decreasing steps towards solution). First reason possible is that too loose stopping criteria for inner iteration were specified. Augmented Lagrangian algorithm solves a sequence of intermediate problems, and requries each of them to be solved with high precision. Insufficient precision results in incorrect update of Lagrange multipliers. Another reason is that you may have specified degenerate constraints: say, some constraint was repeated twice. In most cases AUL algorithm gracefully handles such situations, but sometimes it may spend too much time figuring out subtle degeneracies in constraint matrix. Third reason is tricky and hard to diagnose. Consider situation when you minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is located exactly at the boundary of constrained area. In this case algorithm will tend to oscillate between negative and positive x. Each time it stops at x<0 it "reinforces" constraint x>=0, and each time it is bounced to x>0 it "relaxes" constraint (and is attracted to x<0). Such situation sometimes happens in problems with hidden symetries. Algorithm is got caught in a loop with Lagrange multipliers being continuously increased/decreased. Luckily, such loop forms after at least three iterations, so this problem can be solved by DECREASING number of outer iterations down to 1-2 and increasing penalty coefficient Rho as much as possible. Final reason is numerical noise. AUL algorithm is robust against moderate noise (more robust than, say, active set methods), but large noise may destabilize algorithm. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetalgoaul(const_cast(state.c_ptr()), rho, itscnt, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells MinNLC optimizer to use SLP (Successive Linear Programming) algorithm for nonlinearly constrained optimization. This algorithm is a slight modification of one described in "A Linear programming-based optimization algorithm for solving nonlinear programming problems" (2010) by Claus Still and Tapio Westerlund. This solver is the slowest one in ALGLIB, it requires more target function evaluations that SQP and AUL. However it is somewhat more robust in tricky cases, so it can be used as a backup plan. We recommend to use this algo when SQP/AUL do not work (does not return the solution you expect). If trying different approach gives same results, then MAYBE something is wrong with your optimization problem. Despite its name ("linear" = "first order method") this algorithm performs steps similar to that of conjugate gradients method; internally it uses orthogonality/conjugacy requirement for subsequent steps which makes it closer to second order methods in terms of convergence speed. Convergence is proved for the following case: * function and constraints are continuously differentiable (C1 class) * extended Mangasarian–Fromovitz constraint qualification (EMFCQ) holds; in the context of this algorithm EMFCQ means that one can, for any infeasible point, find a search direction such that the constraint infeasibilities are reduced. This algorithm has following nice properties: * no parameters to tune * no convexity requirements for target function or constraints * initial point can be infeasible * algorithm respects box constraints in all intermediate points (it does not even evaluate function outside of box constrained area) * once linear constraints are enforced, algorithm will not violate them * no such guarantees can be provided for nonlinear constraints, but once nonlinear constraints are enforced, algorithm will try to respect them as much as possible * numerical differentiation does not violate box constraints (although general linear and nonlinear ones can be violated during differentiation) * from our experience, this algorithm is somewhat more robust in really difficult cases INPUT PARAMETERS: State - structure which stores algorithm state ===== TRACING SLP SOLVER ================================================= SLP solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'SLP' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. It also prints OptGuard integrity checker report when nonsmoothness of target/constraints is suspected. * 'SLP.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'SLP'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format * 'SLP.PROBING' - to let algorithm insert additional function evaluations before line search in order to build human-readable chart of the raw Lagrangian (~40 additional function evaluations is performed for each line search). This symbol also implicitly defines 'SLP'. * 'OPTGUARD' - for report of smoothness/continuity violations in target and/or constraints. This kind of reporting is included in 'SLP', but it comes with lots of additional info. If you need just smoothness monitoring, specify this setting. NOTE: this tag merely directs OptGuard output to log file. Even if you specify it, you still have to configure OptGuard by calling minnlcoptguard...() family of functions. By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. Specifying 'SLP.PROBING' adds even larger overhead due to additional function evaluations being performed. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("SLP,SLP.PROBING,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 02.04.2018 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgoslp(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetalgoslp(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells MinNLC optimizer to use SQP (Successive Quadratic Programming) algorithm for nonlinearly constrained optimization. This algorithm needs order of magnitude (5x-10x) less function evaluations than AUL solver, but has higher overhead because each iteration involves solution of quadratic programming problem. Convergence is proved for the following case: * function and constraints are continuously differentiable (C1 class) This algorithm has following nice properties: * no parameters to tune * no convexity requirements for target function or constraints * initial point can be infeasible * algorithm respects box constraints in all intermediate points (it does not even evaluate function outside of box constrained area) * once linear constraints are enforced, algorithm will not violate them * no such guarantees can be provided for nonlinear constraints, but once nonlinear constraints are enforced, algorithm will try to respect them as much as possible * numerical differentiation does not violate box constraints (although general linear and nonlinear ones can be violated during differentiation) We recommend this algorithm as a default option for medium-scale problems (less than thousand of variables) or problems with target function being hard to evaluate. For large-scale problems or ones with very cheap target function AUL solver can be better option. INPUT PARAMETERS: State - structure which stores algorithm state ===== INTERACTION WITH OPTGUARD ========================================== OptGuard integrity checker allows us to catch problems like errors in gradients and discontinuity/nonsmoothness of the target/constraints. Latter kind of problems can be detected by looking upon line searches performed during optimization and searching for signs of nonsmoothness. The problem with SQP is that it is too good for OptGuard to work - it does not perform line searches. It typically needs 1-2 function evaluations per step, and it is not enough for OptGuard to detect nonsmoothness. So, if you suspect that your problem is nonsmooth, we recommend you to use AUL or SLP solvers. ===== TRACING SQP SOLVER ================================================= SQP solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'SQP' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. It also prints OptGuard integrity checker report when nonsmoothness of target/constraints is suspected. * 'SQP.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'SQP'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format * 'SQP.PROBING' - to let algorithm insert additional function evaluations before line search in order to build human-readable chart of the raw Lagrangian (~40 additional function evaluations is performed for each line search). This symbol also implicitly defines 'SQP'. By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. Specifying 'SQP.PROBING' adds even larger overhead due to additional function evaluations being performed. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("SQP,SQP.PROBING,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 02.12.2019 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgosqp(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetalgosqp(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinNLCOptimize(). NOTE: algorithm passes two parameters to rep() callback - current point and penalized function value at current point. Important - function value which is returned is NOT function being minimized. It is sum of the value of the function being minimized - and penalty term. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcsetxrep(const minnlcstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcsetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minnlciteration(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minnlciteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minnlcoptimize(minnlcstate &state, void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(fvec!=NULL, "ALGLIB: error in 'minnlcoptimize()' (fvec is NULL)", &_alglib_env_state); while( alglib_impl::minnlciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfi ) { fvec(state.x, state.fi, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minnlcoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minnlcoptimize(minnlcstate &state, void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(jac!=NULL, "ALGLIB: error in 'minnlcoptimize()' (jac is NULL)", &_alglib_env_state); while( alglib_impl::minnlciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfij ) { jac(state.x, state.fi, state.j, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minnlcoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient/Jacobian. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function (constraints) at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient/Jacobian with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients/Jacobians, and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minnlcoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minnlcsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardgradient(const minnlcstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) and/or constraints b) nonsmooth target function (non-C1) and/or constraints Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. This kind of monitoring does not work well with SQP because SQP solver needs just 1-2 function evaluations per step, which is not enough for OptGuard to make any conclusions. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardsmoothness(const minnlcstate &state, const ae_int_t level, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) and/or constraints b) nonsmooth target function (non-C1) and/or constraints Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. This kind of monitoring does not work well with SQP because SQP solver needs just 1-2 function evaluations per step, which is not enough for OptGuard to make any conclusions. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnlcoptguardsmoothness(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t level; level = 1; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minnlcoptguardgradient() for gradient verification * minnlcoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradfidx for specific function (Jacobian row) suspected * rep.badgradvidx for specific variable (Jacobian column) suspected * rep.badgradxbase, a point where gradient/Jacobian is tested * rep.badgraduser, user-provided gradient/Jacobian * rep.badgradnum, reference gradient/Jacobian obtained via numerical differentiation * rep.nonc0suspected, and additionally: * rep.nonc0fidx - an index of specific function violating C0 continuity * rep.nonc1suspected, and additionally * rep.nonc1fidx - an index of specific function violating C1 continuity Here function index 0 means target function, index 1 or higher denotes nonlinear constraints. === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minnlcoptguardnonc1test0results() * minnlcoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardresults(const minnlcstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardnonc1test0results(const minnlcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardnonc1test0results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardnonc1test1results(const minnlcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcoptguardnonc1test1results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* MinNLC results: the solution found, completion codes and additional information. If you activated OptGuard integrity checking functionality and want to get OptGuard report, it can be retrieved with: * minnlcoptguardresults() - for a primary report about (a) suspected C0/C1 continuity violations and (b) errors in the analytic gradient. * minnlcoptguardnonc1test0results() - for C1 continuity violation test #0, detailed line search log * minnlcoptguardnonc1test1results() - for C1 continuity violation test #1, detailed line search log INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report, contains information about completion code, constraint violation at the solution and so on. You should check rep.terminationtype in order to distinguish successful termination from unsuccessful one: === FAILURE CODES === * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 box constraints are infeasible. Note: infeasibility of non-box constraints does NOT trigger emergency completion; you have to examine rep.bcerr/rep.lcerr/rep.nlcerr to detect possibly inconsistent constraints. === SUCCESS CODES === * 2 scaled step is no more than EpsX. * 5 MaxIts steps were taken. * 8 user requested algorithm termination via minnlcrequesttermination(), last accepted point is returned. More information about fields of this structure can be found in the comments on minnlcreport datatype. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcresults(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* NLC results Buffered implementation of MinNLCResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcresultsbuf(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minnlcrequesttermination(const minnlcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnlcrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores nonlinear optimizer state. You should use functions provided by MinBC subpackage to work with this object *************************************************************************/ _minbcstate_owner::_minbcstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbcstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minbcstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbcstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbcstate)); alglib_impl::_minbcstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minbcstate_owner::_minbcstate_owner(const _minbcstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbcstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbcstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minbcstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbcstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbcstate)); alglib_impl::_minbcstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minbcstate_owner& _minbcstate_owner::operator=(const _minbcstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minbcstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbcstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minbcstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minbcstate)); alglib_impl::_minbcstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minbcstate_owner::~_minbcstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minbcstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minbcstate* _minbcstate_owner::c_ptr() { return p_struct; } alglib_impl::minbcstate* _minbcstate_owner::c_ptr() const { return const_cast(p_struct); } minbcstate::minbcstate() : _minbcstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minbcstate::minbcstate(const minbcstate &rhs):_minbcstate_owner(rhs) ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minbcstate& minbcstate::operator=(const minbcstate &rhs) { if( this==&rhs ) return *this; _minbcstate_owner::operator=(rhs); return *this; } minbcstate::~minbcstate() { } /************************************************************************* This structure stores optimization report: * iterationscount number of iterations * nfev number of gradient evaluations * terminationtype termination type (see below) TERMINATION CODES terminationtype field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. -3 inconsistent constraints. 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 terminated by user who called minbcrequesttermination(). X contains point which was "current accepted" when termination request was submitted. *************************************************************************/ _minbcreport_owner::_minbcreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbcreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minbcreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbcreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbcreport)); alglib_impl::_minbcreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minbcreport_owner::_minbcreport_owner(const _minbcreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minbcreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbcreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minbcreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minbcreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minbcreport)); alglib_impl::_minbcreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minbcreport_owner& _minbcreport_owner::operator=(const _minbcreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minbcreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minbcreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minbcreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minbcreport)); alglib_impl::_minbcreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minbcreport_owner::~_minbcreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minbcreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minbcreport* _minbcreport_owner::c_ptr() { return p_struct; } alglib_impl::minbcreport* _minbcreport_owner::c_ptr() const { return const_cast(p_struct); } minbcreport::minbcreport() : _minbcreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),varidx(p_struct->varidx),terminationtype(p_struct->terminationtype) { } minbcreport::minbcreport(const minbcreport &rhs):_minbcreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),varidx(p_struct->varidx),terminationtype(p_struct->terminationtype) { } minbcreport& minbcreport::operator=(const minbcreport &rhs) { if( this==&rhs ) return *this; _minbcreport_owner::operator=(rhs); return *this; } minbcreport::~minbcreport() { } /************************************************************************* BOX CONSTRAINED OPTIMIZATION WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to box constraints (with some of box constraints actually being equality ones). This optimizer uses algorithm similar to that of MinBLEIC (optimizer with general linear constraints), but presence of box-only constraints allows us to use faster constraint activation strategies. On large-scale problems, with multiple constraints active at the solution, this optimizer can be several times faster than BLEIC. REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBCCreate() call 2. USer adds box constraints by calling MinBCSetBC() function. 3. User sets stopping conditions with MinBCSetCond(). 4. User calls MinBCOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBCResults() to get solution 6. Optionally user may call MinBCRestartFrom() to solve another problem with same N but another starting point. MinBCRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BOX CONSTRAINED OPTIMIZATION WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to box constraints (with some of box constraints actually being equality ones). This optimizer uses algorithm similar to that of MinBLEIC (optimizer with general linear constraints), but presence of box-only constraints allows us to use faster constraint activation strategies. On large-scale problems, with multiple constraints active at the solution, this optimizer can be several times faster than BLEIC. REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBCCreate() call 2. USer adds box constraints by calling MinBCSetBC() function. 3. User sets stopping conditions with MinBCSetCond(). 4. User calls MinBCOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBCResults() to get solution 6. Optionally user may call MinBCRestartFrom() to solve another problem with same N but another starting point. MinBCRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbccreate(const real_1d_array &x, minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbccreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* The subroutine is finite difference variant of MinBCCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinBCCreate() in order to get more information about creation of BC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* The subroutine is finite difference variant of MinBCCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinBCCreate() in order to get more information about creation of BC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbccreatef(const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbccreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets boundary constraints for BC optimizer. Boundary constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinBCRestartFrom(). INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: this solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints, even when numerical differentiation is used (algorithm adjusts nodes according to boundary constraints) -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinBCSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform slightly more than MaxIts iterations. I.e., MaxIts sets non-strict limit on iterations count. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetcond(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for BC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the BC too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinBCSetPrec...() functions. There is a special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minbcsetscale(const minbcstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecdefault(const minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetprecdefault(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE 1: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetprecdiag(const_cast(state.c_ptr()), const_cast(d.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinBCSetScale() call (before or after MinBCSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecscale(const minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetprecscale(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinBCOptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetxrep(const minbcstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetstpmax(const minbcstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcsetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minbciteration(const minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minbciteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minbcoptimize(minbcstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minbcoptimize()' (func is NULL)", &_alglib_env_state); while( alglib_impl::minbciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minbcoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minbcoptimize(minbcstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minbcoptimize()' (grad is NULL)", &_alglib_env_state); while( alglib_impl::minbciteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minbcoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minbcoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minbcsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minbcoptguardgradient(const minbcstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardsmoothness(const minbcstate &state, const ae_int_t level, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minbcoptguardsmoothness(const minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t level; level = 1; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minbcoptguardgradient() for gradient verification * minbcoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minbcoptguardnonc1test0results() * minbcoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardresults(const minbcstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardnonc1test0results(const minbcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardnonc1test0results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardnonc1test1results(const minbcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcoptguardnonc1test1results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 inconsistent constraints. * 1 relative function improvement is no more than EpsF. * 2 scaled step is no more than EpsX. * 4 scaled gradient norm is no more than EpsG. * 5 MaxIts steps was taken * 8 terminated by user who called minbcrequesttermination(). X contains point which was "current accepted" when termination request was submitted. More information about fields of this structure can be found in the comments on MinBCReport datatype. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* BC results Buffered implementation of MinBCResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcresultsbuf(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinBCCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcrestartfrom(const minbcstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minbcrequesttermination(const minbcstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbcrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores nonlinear optimizer state. You should use functions provided by MinNS subpackage to work with this object *************************************************************************/ _minnsstate_owner::_minnsstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnsstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minnsstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnsstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnsstate)); alglib_impl::_minnsstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minnsstate_owner::_minnsstate_owner(const _minnsstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnsstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnsstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minnsstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnsstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnsstate)); alglib_impl::_minnsstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minnsstate_owner& _minnsstate_owner::operator=(const _minnsstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minnsstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnsstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minnsstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minnsstate)); alglib_impl::_minnsstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minnsstate_owner::~_minnsstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minnsstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minnsstate* _minnsstate_owner::c_ptr() { return p_struct; } alglib_impl::minnsstate* _minnsstate_owner::c_ptr() const { return const_cast(p_struct); } minnsstate::minnsstate() : _minnsstate_owner() ,needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),j(&p_struct->j),x(&p_struct->x) { } minnsstate::minnsstate(const minnsstate &rhs):_minnsstate_owner(rhs) ,needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),j(&p_struct->j),x(&p_struct->x) { } minnsstate& minnsstate::operator=(const minnsstate &rhs) { if( this==&rhs ) return *this; _minnsstate_owner::operator=(rhs); return *this; } minnsstate::~minnsstate() { } /************************************************************************* This structure stores optimization report: * IterationsCount total number of inner iterations * NFEV number of gradient evaluations * TerminationType termination type (see below) * CErr maximum violation of all types of constraints * LCErr maximum violation of linear constraints * NLCErr maximum violation of nonlinear constraints TERMINATION CODES TerminationType field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. -3 box constraints are inconsistent -1 inconsistent parameters were passed: * penalty parameter for minnssetalgoags() is zero, but we have nonlinear constraints set by minnssetnlc() 2 sampling radius decreased below epsx 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 User requested termination via MinNSRequestTermination() Other fields of this structure are not documented and should not be used! *************************************************************************/ _minnsreport_owner::_minnsreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnsreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minnsreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnsreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnsreport)); alglib_impl::_minnsreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minnsreport_owner::_minnsreport_owner(const _minnsreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minnsreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnsreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minnsreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minnsreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minnsreport)); alglib_impl::_minnsreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minnsreport_owner& _minnsreport_owner::operator=(const _minnsreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minnsreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minnsreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minnsreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minnsreport)); alglib_impl::_minnsreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minnsreport_owner::~_minnsreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minnsreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minnsreport* _minnsreport_owner::c_ptr() { return p_struct; } alglib_impl::minnsreport* _minnsreport_owner::c_ptr() const { return const_cast(p_struct); } minnsreport::minnsreport() : _minnsreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),cerr(p_struct->cerr),lcerr(p_struct->lcerr),nlcerr(p_struct->nlcerr),terminationtype(p_struct->terminationtype),varidx(p_struct->varidx),funcidx(p_struct->funcidx) { } minnsreport::minnsreport(const minnsreport &rhs):_minnsreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),cerr(p_struct->cerr),lcerr(p_struct->lcerr),nlcerr(p_struct->nlcerr),terminationtype(p_struct->terminationtype),varidx(p_struct->varidx),funcidx(p_struct->funcidx) { } minnsreport& minnsreport::operator=(const minnsreport &rhs) { if( this==&rhs ) return *this; _minnsreport_owner::operator=(rhs); return *this; } minnsreport::~minnsreport() { } /************************************************************************* NONSMOOTH NONCONVEX OPTIMIZATION SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 IMPORTANT: see MinNSSetAlgoAGS for important information on performance restrictions of AGS solver. REQUIREMENTS: * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuous, locally Lipschitz and continuously (but not necessarily twice) differentiable in an open dense subset of R^N. Functions F(), G() and H() may be nonsmooth and non-convex. Informally speaking, it means that functions are composed of large differentiable "patches" with nonsmoothness having place only at the boundaries between these "patches". Most real-life nonsmooth functions satisfy these requirements. Say, anything which involves finite number of abs(), min() and max() is very likely to pass the test. Say, it is possible to optimize anything of the following: * f=abs(x0)+2*abs(x1) * f=max(x0,x1) * f=sin(max(x0,x1)+abs(x2)) * for nonlinearly constrained problems: F() must be bounded from below without nonlinear constraints (this requirement is due to the fact that, contrary to box and linear constraints, nonlinear ones require special handling). * user must provide function value and gradient for F(), H(), G() at all points where function/gradient can be calculated. If optimizer requires value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)), where gradient is not defined, user may resolve tie arbitrarily (in our case - return +1 or -1 at its discretion). * NS solver supports numerical differentiation, i.e. it may differentiate your function for you, but it results in 2N increase of function evaluations. Not recommended unless you solve really small problems. See minnscreatef() for more information on this functionality. USAGE: 1. User initializes algorithm state with MinNSCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * AGS solver (activated with MinNSSetAlgoAGS() function) 2. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) MinNSSetBC() for boundary constraints b) MinNSSetLC() for linear constraints c) MinNSSetNLC() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 3. User sets scale of the variables with MinNSSetScale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 4. User sets stopping conditions with MinNSSetCond(). 5. Finally, user calls MinNSOptimize() function which takes algorithm state and pointer (delegate, etc) to callback function which calculates F/G/H. 7. User calls MinNSResults() to get solution 8. Optionally user may call MinNSRestartFrom() to solve another problem with same N but another starting point. MinNSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state NOTE: minnscreatef() function may be used if you do not have analytic gradient. This function creates solver which uses numerical differentiation with user-specified step. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnscreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* NONSMOOTH NONCONVEX OPTIMIZATION SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 IMPORTANT: see MinNSSetAlgoAGS for important information on performance restrictions of AGS solver. REQUIREMENTS: * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuous, locally Lipschitz and continuously (but not necessarily twice) differentiable in an open dense subset of R^N. Functions F(), G() and H() may be nonsmooth and non-convex. Informally speaking, it means that functions are composed of large differentiable "patches" with nonsmoothness having place only at the boundaries between these "patches". Most real-life nonsmooth functions satisfy these requirements. Say, anything which involves finite number of abs(), min() and max() is very likely to pass the test. Say, it is possible to optimize anything of the following: * f=abs(x0)+2*abs(x1) * f=max(x0,x1) * f=sin(max(x0,x1)+abs(x2)) * for nonlinearly constrained problems: F() must be bounded from below without nonlinear constraints (this requirement is due to the fact that, contrary to box and linear constraints, nonlinear ones require special handling). * user must provide function value and gradient for F(), H(), G() at all points where function/gradient can be calculated. If optimizer requires value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)), where gradient is not defined, user may resolve tie arbitrarily (in our case - return +1 or -1 at its discretion). * NS solver supports numerical differentiation, i.e. it may differentiate your function for you, but it results in 2N increase of function evaluations. Not recommended unless you solve really small problems. See minnscreatef() for more information on this functionality. USAGE: 1. User initializes algorithm state with MinNSCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * AGS solver (activated with MinNSSetAlgoAGS() function) 2. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) MinNSSetBC() for boundary constraints b) MinNSSetLC() for linear constraints c) MinNSSetNLC() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 3. User sets scale of the variables with MinNSSetScale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 4. User sets stopping conditions with MinNSSetCond(). 5. Finally, user calls MinNSOptimize() function which takes algorithm state and pointer (delegate, etc) to callback function which calculates F/G/H. 7. User calls MinNSResults() to get solution 8. Optionally user may call MinNSRestartFrom() to solve another problem with same N but another starting point. MinNSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state NOTE: minnscreatef() function may be used if you do not have analytic gradient. This function creates solver which uses numerical differentiation with user-specified step. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnscreate(const real_1d_array &x, minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnscreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Version of minnscreatef() which uses numerical differentiation. I.e., you do not have to calculate derivatives yourself. However, this version needs 2N times more function evaluations. 2-point differentiation formula is used, because more precise 4-point formula is unstable when used on non-smooth functions. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, DiffStep>0. Algorithm performs numerical differentiation with step for I-th variable being equal to DiffStep*S[I] (here S[] is a scale vector, set by minnssetscale() function). Do not use too small steps, because it may lead to catastrophic cancellation during intermediate calculations. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnscreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Version of minnscreatef() which uses numerical differentiation. I.e., you do not have to calculate derivatives yourself. However, this version needs 2N times more function evaluations. 2-point differentiation formula is used, because more precise 4-point formula is unstable when used on non-smooth functions. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, DiffStep>0. Algorithm performs numerical differentiation with step for I-th variable being equal to DiffStep*S[I] (here S[] is a scale vector, set by minnssetscale() function). Do not use too small steps, because it may lead to catastrophic cancellation during intermediate calculations. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnscreatef(const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnscreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets boundary constraints. Boundary constraints are inactive by default (after initial creation). They are preserved after algorithm restart with minnsrestartfrom(). INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: AGS solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints, even when numerical differentiation is used (algorithm adjusts nodes according to boundary constraints) -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with minnsrestartfrom(). INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about current sampling radius in magnitude during optimization, about EpsX in the solution) due to use of penalty method to handle constraints. * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets linear constraints. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with minnsrestartfrom(). INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about current sampling radius in magnitude during optimization, about EpsX in the solution) due to use of penalty method to handle constraints. * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minnssetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets nonlinear constraints. In fact, this function sets NUMBER of nonlinear constraints. Constraints itself (constraint functions) are passed to minnsoptimize() method. This method requires user-defined vector function F[] and its Jacobian J[], where: * first component of F[] and first row of Jacobian J[] correspond to function being minimized * next NLEC components of F[] (and rows of J) correspond to nonlinear equality constraints G_i(x)=0 * next NLIC components of F[] (and rows of J) correspond to nonlinear inequality constraints H_i(x)<=0 NOTE: you may combine nonlinear constraints with linear/boundary ones. If your problem has mixed constraints, you may explicitly specify some of them as linear ones. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 NOTE 1: nonlinear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of the feasible area! NOTE 2: algorithm scales variables according to scale specified by minnssetscale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, there is no way to automatically scale nonlinear constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may ruin convergence. Solving problem with constraint "1000*G0(x)=0" is NOT same as solving it with constraint "0.001*G0(x)=0". It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to scale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has approximately unit magnitude (for problems with unit scale) or has magnitude approximately equal to 1/S[i] (where S is a scale set by minnssetscale() function). NOTE 3: nonlinear constraints are always hard to handle, no matter what algorithm you try to use. Even basic box/linear constraints modify function curvature by adding valleys and ridges. However, nonlinear constraints add valleys which are very hard to follow due to their "curved" nature. It means that optimization with single nonlinear constraint may be significantly slower than optimization with multiple linear ones. It is normal situation, and we recommend you to carefully choose Rho parameter of minnssetalgoags(), because too large value may slow down convergence. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetnlc(const_cast(state.c_ptr()), nlec, nlic, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets stopping conditions for iterations of optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The AGS solver finishes its work if on k+1-th iteration sampling radius decreases below EpsX. MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. We do not recommend you to rely on default choice in production code. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetcond(const_cast(state.c_ptr()), epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for NLC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetscale(const minnsstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function tells MinNS unit to use AGS (adaptive gradient sampling) algorithm for nonsmooth constrained optimization. This algorithm is a slight modification of one described in "An Adaptive Gradient Sampling Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez. This optimizer has following benefits and drawbacks: + robustness; it can be used with nonsmooth and nonconvex functions. + relatively easy tuning; most of the metaparameters are easy to select. - it has convergence of steepest descent, slower than CG/LBFGS. - each iteration involves evaluation of ~2N gradient values and solution of 2Nx2N quadratic programming problem, which limits applicability of algorithm by small-scale problems (up to 50-100). IMPORTANT: this algorithm has convergence guarantees, i.e. it will steadily move towards some stationary point of the function. However, "stationary point" does not always mean "solution". Nonsmooth problems often have "flat spots", i.e. areas where function do not change at all. Such "flat spots" are stationary points by definition, and algorithm may be caught here. Nonsmooth CONVEX tasks are not prone to this problem. Say, if your function has form f()=MAX(f0,f1,...), and f_i are convex, then f() is convex too and you have guaranteed convergence to solution. INPUT PARAMETERS: State - structure which stores algorithm state Radius - initial sampling radius, >=0. Internally multiplied by vector of per-variable scales specified by minnssetscale()). You should select relatively large sampling radius, roughly proportional to scaled length of the first steps of the algorithm. Something close to 0.1 in magnitude should be good for most problems. AGS solver can automatically decrease radius, so too large radius is not a problem (assuming that you won't choose so large radius that algorithm will sample function in too far away points, where gradient value is irrelevant). Too small radius won't cause algorithm to fail, but it may slow down algorithm (it may have to perform too short steps). Penalty - penalty coefficient for nonlinear constraints: * for problem with nonlinear constraints should be some problem-specific positive value, large enough that penalty term changes shape of the function. Starting from some problem-specific value penalty coefficient becomes large enough to exactly enforce nonlinear constraints; larger values do not improve precision. Increasing it too much may slow down convergence, so you should choose it carefully. * can be zero for problems WITHOUT nonlinear constraints (i.e. for unconstrained ones or ones with just box or linear constraints) * if you specify zero value for problem with at least one nonlinear constraint, algorithm will terminate with error code -1. ALGORITHM OUTLINE The very basic outline of unconstrained AGS algorithm is given below: 0. If sampling radius is below EpsX or we performed more then MaxIts iterations - STOP. 1. sample O(N) gradient values at random locations around current point; informally speaking, this sample is an implicit piecewise linear model of the function, although algorithm formulation does not mention that explicitly 2. solve quadratic programming problem in order to find descent direction 3. if QP solver tells us that we are near solution, decrease sampling radius and move to (0) 4. perform backtracking line search 5. after moving to new point, goto (0) As for the constraints: * box constraints are handled exactly by modification of the function being minimized * linear/nonlinear constraints are handled by adding L1 penalty. Because our solver can handle nonsmoothness, we can use L1 penalty function, which is an exact one (i.e. exact solution is returned under such penalty). * penalty coefficient for linear constraints is chosen automatically; however, penalty coefficient for nonlinear constraints must be specified by user. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetalgoags(const minnsstate &state, const double radius, const double penalty, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetalgoags(const_cast(state.c_ptr()), radius, penalty, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to minnsoptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnssetxrep(const minnsstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnssetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsrequesttermination(const minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnsrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minnsiteration(const minnsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minnsiteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minnsoptimize(minnsstate &state, void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(fvec!=NULL, "ALGLIB: error in 'minnsoptimize()' (fvec is NULL)", &_alglib_env_state); while( alglib_impl::minnsiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfi ) { fvec(state.x, state.fi, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minnsoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minnsoptimize(minnsstate &state, void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(jac!=NULL, "ALGLIB: error in 'minnsoptimize()' (jac is NULL)", &_alglib_env_state); while( alglib_impl::minnsiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfij ) { jac(state.x, state.fi, state.j, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minnsoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* MinNS results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 box constraints are inconsistent * -1 inconsistent parameters were passed: * penalty parameter for minnssetalgoags() is zero, but we have nonlinear constraints set by minnssetnlc() * 2 sampling radius decreased below epsx * 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. * 8 User requested termination via minnsrequesttermination() -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnsresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Buffered implementation of minnsresults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsresultsbuf(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnsresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. X - new starting point. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsrestartfrom(const minnsstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minnsrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* *************************************************************************/ _minasastate_owner::_minasastate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minasastate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minasastate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minasastate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minasastate)); alglib_impl::_minasastate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minasastate_owner::_minasastate_owner(const _minasastate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minasastate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minasastate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minasastate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minasastate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minasastate)); alglib_impl::_minasastate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minasastate_owner& _minasastate_owner::operator=(const _minasastate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minasastate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minasastate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minasastate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minasastate)); alglib_impl::_minasastate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minasastate_owner::~_minasastate_owner() { if( p_struct!=NULL ) { alglib_impl::_minasastate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minasastate* _minasastate_owner::c_ptr() { return p_struct; } alglib_impl::minasastate* _minasastate_owner::c_ptr() const { return const_cast(p_struct); } minasastate::minasastate() : _minasastate_owner() ,needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minasastate::minasastate(const minasastate &rhs):_minasastate_owner(rhs) ,needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } minasastate& minasastate::operator=(const minasastate &rhs) { if( this==&rhs ) return *this; _minasastate_owner::operator=(rhs); return *this; } minasastate::~minasastate() { } /************************************************************************* *************************************************************************/ _minasareport_owner::_minasareport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minasareport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minasareport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minasareport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minasareport)); alglib_impl::_minasareport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minasareport_owner::_minasareport_owner(const _minasareport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minasareport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minasareport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minasareport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minasareport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minasareport)); alglib_impl::_minasareport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minasareport_owner& _minasareport_owner::operator=(const _minasareport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minasareport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minasareport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minasareport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minasareport)); alglib_impl::_minasareport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minasareport_owner::~_minasareport_owner() { if( p_struct!=NULL ) { alglib_impl::_minasareport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minasareport* _minasareport_owner::c_ptr() { return p_struct; } alglib_impl::minasareport* _minasareport_owner::c_ptr() const { return const_cast(p_struct); } minasareport::minasareport() : _minasareport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype),activeconstraints(p_struct->activeconstraints) { } minasareport::minasareport(const minasareport &rhs):_minasareport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype),activeconstraints(p_struct->activeconstraints) { } minasareport& minasareport::operator=(const minasareport &rhs) { if( this==&rhs ) return *this; _minasareport_owner::operator=(rhs); return *this; } minasareport::~minasareport() { } /************************************************************************* Obsolete function, use MinLBFGSSetPrecDefault() instead. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetdefaultpreconditioner(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlbfgssetcholeskypreconditioner(const_cast(state.c_ptr()), const_cast(p.c_ptr()), isupper, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This is obsolete function which was used by previous version of the BLEIC optimizer. It does nothing in the current version of BLEIC. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetbarrierwidth(const minbleicstate &state, const double mu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetbarrierwidth(const_cast(state.c_ptr()), mu, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This is obsolete function which was used by previous version of the BLEIC optimizer. It does nothing in the current version of BLEIC. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minbleicsetbarrierdecay(const_cast(state.c_ptr()), mudecay, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasacreate(n, const_cast(x.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; if( (x.length()!=bndl.length()) || (x.length()!=bndu.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minasacreate': looks like one of arguments has wrong size"); n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasacreate(n, const_cast(x.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasasetcond(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetxrep(const minasastate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasasetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetalgorithm(const minasastate &state, const ae_int_t algotype, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasasetalgorithm(const_cast(state.c_ptr()), algotype, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetstpmax(const minasastate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasasetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minasaiteration(const minasastate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minasaiteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minasaoptimize(minasastate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minasaoptimize()' (grad is NULL)", &_alglib_env_state); while( alglib_impl::minasaiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minasaoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasaresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasaresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minasarestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This object stores state of the nonlinear CG optimizer. You should use ALGLIB functions to work with this object. *************************************************************************/ _mincgstate_owner::_mincgstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_mincgstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::mincgstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mincgstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::mincgstate)); alglib_impl::_mincgstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _mincgstate_owner::_mincgstate_owner(const _mincgstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_mincgstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mincgstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::mincgstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mincgstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::mincgstate)); alglib_impl::_mincgstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _mincgstate_owner& _mincgstate_owner::operator=(const _mincgstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mincgstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mincgstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_mincgstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::mincgstate)); alglib_impl::_mincgstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _mincgstate_owner::~_mincgstate_owner() { if( p_struct!=NULL ) { alglib_impl::_mincgstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::mincgstate* _mincgstate_owner::c_ptr() { return p_struct; } alglib_impl::mincgstate* _mincgstate_owner::c_ptr() const { return const_cast(p_struct); } mincgstate::mincgstate() : _mincgstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } mincgstate::mincgstate(const mincgstate &rhs):_mincgstate_owner(rhs) ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),f(p_struct->f),g(&p_struct->g),x(&p_struct->x) { } mincgstate& mincgstate::operator=(const mincgstate &rhs) { if( this==&rhs ) return *this; _mincgstate_owner::operator=(rhs); return *this; } mincgstate::~mincgstate() { } /************************************************************************* This structure stores optimization report: * IterationsCount total number of inner iterations * NFEV number of gradient evaluations * TerminationType termination type (see below) TERMINATION CODES TerminationType field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG 5 MaxIts steps was taken 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. 8 terminated by user who called mincgrequesttermination(). X contains point which was "current accepted" when termination request was submitted. Other fields of this structure are not documented and should not be used! *************************************************************************/ _mincgreport_owner::_mincgreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_mincgreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::mincgreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mincgreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::mincgreport)); alglib_impl::_mincgreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _mincgreport_owner::_mincgreport_owner(const _mincgreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_mincgreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mincgreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::mincgreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mincgreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::mincgreport)); alglib_impl::_mincgreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _mincgreport_owner& _mincgreport_owner::operator=(const _mincgreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mincgreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mincgreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_mincgreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::mincgreport)); alglib_impl::_mincgreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _mincgreport_owner::~_mincgreport_owner() { if( p_struct!=NULL ) { alglib_impl::_mincgreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::mincgreport* _mincgreport_owner::c_ptr() { return p_struct; } alglib_impl::mincgreport* _mincgreport_owner::c_ptr() const { return const_cast(p_struct); } mincgreport::mincgreport() : _mincgreport_owner() ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype) { } mincgreport::mincgreport(const mincgreport &rhs):_mincgreport_owner(rhs) ,iterationscount(p_struct->iterationscount),nfev(p_struct->nfev),terminationtype(p_struct->terminationtype) { } mincgreport& mincgreport::operator=(const mincgreport &rhs) { if( this==&rhs ) return *this; _mincgreport_owner::operator=(rhs); return *this; } mincgreport::~mincgreport() { } /************************************************************************* NONLINEAR CONJUGATE GRADIENT METHOD DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using one of the nonlinear conjugate gradient methods. These CG methods are globally convergent (even on non-convex functions) as long as grad(f) is Lipschitz continuous in a some neighborhood of the L = { x : f(x)<=f(x0) }. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinCGCreate() call 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and other functions 3. User calls MinCGOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinCGResults() to get solution 5. Optionally, user may call MinCGRestartFrom() to solve another problem with same N but another starting point and/or another function. MinCGRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgcreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* NONLINEAR CONJUGATE GRADIENT METHOD DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using one of the nonlinear conjugate gradient methods. These CG methods are globally convergent (even on non-convex functions) as long as grad(f) is Lipschitz continuous in a some neighborhood of the L = { x : f(x)<=f(x0) }. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinCGCreate() call 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and other functions 3. User calls MinCGOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinCGResults() to get solution 5. Optionally, user may call MinCGRestartFrom() to solve another problem with same N but another starting point and/or another function. MinCGRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void mincgcreate(const real_1d_array &x, mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgcreate(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* The subroutine is finite difference variant of MinCGCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinCGCreate() in order to get more information about creation of CG optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinCGSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. L-BFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgcreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* The subroutine is finite difference variant of MinCGCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function only. We recommend to read comments on MinCGCreate() in order to get more information about creation of CG optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinCGSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. L-BFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgcreatef(n, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets stopping conditions for CG optimization algorithm. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinCGSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetcond(const_cast(state.c_ptr()), epsg, epsf, epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for CG optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of CG optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the CG too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinCGSetPrec...() functions. There is special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void mincgsetscale(const mincgstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinCGOptimize(). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetxrep(const mincgstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets CG algorithm. INPUT PARAMETERS: State - structure which stores algorithm state CGType - algorithm type: * -1 automatic selection of the best algorithm * 0 DY (Dai and Yuan) algorithm * 1 Hybrid DY-HS algorithm -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetcgtype(const_cast(state.c_ptr()), cgtype, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetstpmax(const mincgstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function allows to suggest initial step length to the CG algorithm. Suggested step length is used as starting point for the line search. It can be useful when you have badly scaled problem, i.e. when ||grad|| (which is used as initial estimate for the first step) is many orders of magnitude different from the desired step. Line search may fail on such problems without good estimate of initial step length. Imagine, for example, problem with ||grad||=10^50 and desired step equal to 0.1 Line search function will use 10^50 as initial step, then it will decrease step length by 2 (up to 20 attempts) and will get 10^44, which is still too large. This function allows us to tell than line search should be started from some moderate step length, like 1.0, so algorithm will be able to detect desired step length in a several searches. Default behavior (when no step is suggested) is to use preconditioner, if it is available, to generate initial estimate of step length. This function influences only first iteration of algorithm. It should be called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. Suggested step is ignored if you have preconditioner. INPUT PARAMETERS: State - structure used to store algorithm state. Stp - initial estimate of the step length. Can be zero (no estimate). -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void mincgsuggeststep(const mincgstate &state, const double stp, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsuggeststep(const_cast(state.c_ptr()), stp, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecdefault(const mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetprecdefault(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: D[i] should be positive. Exception will be thrown otherwise. NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetprecdiag(const_cast(state.c_ptr()), const_cast(d.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinCGSetScale() call (before or after MinCGSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecscale(const mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgsetprecscale(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool mincgiteration(const mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::mincgiteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void mincgoptimize(mincgstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'mincgoptimize()' (func is NULL)", &_alglib_env_state); while( alglib_impl::mincgiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void mincgoptimize(mincgstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'mincgoptimize()' (grad is NULL)", &_alglib_env_state); while( alglib_impl::mincgiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with mincgoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with mincgsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void mincgoptguardgradient(const mincgstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardsmoothness(const mincgstate &state, const ae_int_t level, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void mincgoptguardsmoothness(const mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t level; level = 1; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardsmoothness(const_cast(state.c_ptr()), level, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * mincgoptguardgradient() for gradient verification * mincgoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * mincgoptguardnonc1test0results() * mincgoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardresults(const mincgstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardnonc1test0results(const mincgstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardnonc1test0results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardnonc1test1results(const mincgstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgoptguardnonc1test1results(const_cast(state.c_ptr()), const_cast(strrep.c_ptr()), const_cast(lngrep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Conjugate gradient results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report: * Rep.TerminationType completetion code: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -7 gradient verification failed. See MinCGSetGradientCheck() for more information. * 1 relative function improvement is no more than EpsF. * 2 relative step is no more than EpsX. * 4 gradient norm is no more than EpsG * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible, we return best X found so far * 8 terminated by user * Rep.IterationsCount contains iterations count * NFEV countains number of function calculations -- ALGLIB -- Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Conjugate gradient results Buffered implementation of MinCGResults(), which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts CG algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used to store algorithm state. X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void mincgrestartfrom(const mincgstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void mincgrequesttermination(const mincgstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::mincgrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Levenberg-Marquardt optimizer. This structure should be created using one of the MinLMCreate???() functions. You should not access its fields directly; use ALGLIB functions to work with it. *************************************************************************/ _minlmstate_owner::_minlmstate_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlmstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlmstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlmstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlmstate)); alglib_impl::_minlmstate_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlmstate_owner::_minlmstate_owner(const _minlmstate_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlmstate_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlmstate copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlmstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlmstate), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlmstate)); alglib_impl::_minlmstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlmstate_owner& _minlmstate_owner::operator=(const _minlmstate_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlmstate assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlmstate assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlmstate_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlmstate)); alglib_impl::_minlmstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlmstate_owner::~_minlmstate_owner() { if( p_struct!=NULL ) { alglib_impl::_minlmstate_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlmstate* _minlmstate_owner::c_ptr() { return p_struct; } alglib_impl::minlmstate* _minlmstate_owner::c_ptr() const { return const_cast(p_struct); } minlmstate::minlmstate() : _minlmstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),needfgh(p_struct->needfgh),needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),g(&p_struct->g),h(&p_struct->h),j(&p_struct->j),x(&p_struct->x) { } minlmstate::minlmstate(const minlmstate &rhs):_minlmstate_owner(rhs) ,needf(p_struct->needf),needfg(p_struct->needfg),needfgh(p_struct->needfgh),needfi(p_struct->needfi),needfij(p_struct->needfij),xupdated(p_struct->xupdated),f(p_struct->f),fi(&p_struct->fi),g(&p_struct->g),h(&p_struct->h),j(&p_struct->j),x(&p_struct->x) { } minlmstate& minlmstate::operator=(const minlmstate &rhs) { if( this==&rhs ) return *this; _minlmstate_owner::operator=(rhs); return *this; } minlmstate::~minlmstate() { } /************************************************************************* Optimization report, filled by MinLMResults() function FIELDS: * TerminationType, completetion code: * -8 optimizer detected NAN/INF values either in the function itself, or in its Jacobian * -5 inappropriate solver was used: * solver created with minlmcreatefgh() used on problem with general linear constraints (set with minlmsetlc() call). * -3 constraints are inconsistent * 2 relative step is no more than EpsX. * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible * 8 terminated by user who called MinLMRequestTermination(). X contains point which was "current accepted" when termination request was submitted. * IterationsCount, contains iterations count * NFunc, number of function calculations * NJac, number of Jacobi matrix calculations * NGrad, number of gradient calculations * NHess, number of Hessian calculations * NCholesky, number of Cholesky decomposition calculations *************************************************************************/ _minlmreport_owner::_minlmreport_owner() { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlmreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; p_struct = (alglib_impl::minlmreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlmreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlmreport)); alglib_impl::_minlmreport_init(p_struct, &_state, ae_false); ae_state_clear(&_state); } _minlmreport_owner::_minlmreport_owner(const _minlmreport_owner &rhs) { jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { if( p_struct!=NULL ) { alglib_impl::_minlmreport_destroy(p_struct); alglib_impl::ae_free(p_struct); } p_struct = NULL; #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); p_struct = NULL; alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlmreport copy constructor failure (source is not initialized)", &_state); p_struct = (alglib_impl::minlmreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::minlmreport), &_state); memset(p_struct, 0, sizeof(alglib_impl::minlmreport)); alglib_impl::_minlmreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); } _minlmreport_owner& _minlmreport_owner::operator=(const _minlmreport_owner &rhs) { if( this==&rhs ) return *this; jmp_buf _break_jump; alglib_impl::ae_state _state; alglib_impl::ae_state_init(&_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return *this; #endif } alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: minlmreport assignment constructor failure (destination is not initialized)", &_state); alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: minlmreport assignment constructor failure (source is not initialized)", &_state); alglib_impl::_minlmreport_destroy(p_struct); memset(p_struct, 0, sizeof(alglib_impl::minlmreport)); alglib_impl::_minlmreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); ae_state_clear(&_state); return *this; } _minlmreport_owner::~_minlmreport_owner() { if( p_struct!=NULL ) { alglib_impl::_minlmreport_destroy(p_struct); ae_free(p_struct); } } alglib_impl::minlmreport* _minlmreport_owner::c_ptr() { return p_struct; } alglib_impl::minlmreport* _minlmreport_owner::c_ptr() const { return const_cast(p_struct); } minlmreport::minlmreport() : _minlmreport_owner() ,iterationscount(p_struct->iterationscount),terminationtype(p_struct->terminationtype),nfunc(p_struct->nfunc),njac(p_struct->njac),ngrad(p_struct->ngrad),nhess(p_struct->nhess),ncholesky(p_struct->ncholesky) { } minlmreport::minlmreport(const minlmreport &rhs):_minlmreport_owner(rhs) ,iterationscount(p_struct->iterationscount),terminationtype(p_struct->terminationtype),nfunc(p_struct->nfunc),njac(p_struct->njac),ngrad(p_struct->ngrad),nhess(p_struct->nhess),ncholesky(p_struct->ncholesky) { } minlmreport& minlmreport::operator=(const minlmreport &rhs) { if( this==&rhs ) return *this; _minlmreport_owner::operator=(rhs); return *this; } minlmreport::~minlmreport() { } /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] and Jacobian of f[]. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X * function vector f[] and Jacobian of f[] (simultaneously) at given point There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() and jac() callbacks. First one is used to calculate f[] at given point, second one calculates f[] and Jacobian df[i]/dx[j]. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not provide Jacobian), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateVJ() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatevj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] and Jacobian of f[]. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X * function vector f[] and Jacobian of f[] (simultaneously) at given point There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() and jac() callbacks. First one is used to calculate f[] at given point, second one calculates f[] and Jacobian df[i]/dx[j]. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not provide Jacobian), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateVJ() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatevj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] only. Finite differences are used to calculate Jacobian. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() callback. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not accept function vector), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateV() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state See also MinLMIteration, MinLMResults. NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatev(n, m, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] only. Finite differences are used to calculate Jacobian. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() callback. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not accept function vector), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateV() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state See also MinLMIteration, MinLMResults. NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatev(n, m, const_cast(x.c_ptr()), diffstep, const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION DESCRIPTION: This function is used to find minimum of general form (not "sum-of- -squares") function F = F(x[0], ..., x[n-1]) using its gradient and Hessian. Levenberg-Marquardt modification with L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization after each Levenberg-Marquardt step is used. REQUIREMENTS: This algorithm will request following information during its operation: * function value F at given point X * F and gradient G (simultaneously) at given point X * F, G and Hessian H (simultaneously) at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts func(), grad() and hess() function pointers. First pointer is used to calculate F at given point, second one calculates F(x) and grad F(x), third one calculates F(x), grad F(x), hess F(x). You can try to initialize MinLMState structure with FGH-function and then use incorrect version of MinLMOptimize() (for example, version which does not provide Hessian matrix), but it will lead to exception being thrown after first attempt to calculate Hessian. USAGE: 1. User initializes algorithm state with MinLMCreateFGH() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and pointers (delegates, etc.) to callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefgh(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION DESCRIPTION: This function is used to find minimum of general form (not "sum-of- -squares") function F = F(x[0], ..., x[n-1]) using its gradient and Hessian. Levenberg-Marquardt modification with L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization after each Levenberg-Marquardt step is used. REQUIREMENTS: This algorithm will request following information during its operation: * function value F at given point X * F and gradient G (simultaneously) at given point X * F, G and Hessian H (simultaneously) at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts func(), grad() and hess() function pointers. First pointer is used to calculate F at given point, second one calculates F(x) and grad F(x), third one calculates F(x), grad F(x), hess F(x). You can try to initialize MinLMState structure with FGH-function and then use incorrect version of MinLMOptimize() (for example, version which does not provide Hessian matrix), but it will lead to exception being thrown after first attempt to calculate Hessian. USAGE: 1. User initializes algorithm state with MinLMCreateFGH() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and pointers (delegates, etc.) to callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatefgh(const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefgh(n, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function sets stopping conditions for Levenberg-Marquardt optimization algorithm. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinLMSetScale() Recommended values: 1E-9 ... 1E-12. MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Only Levenberg-Marquardt iterations are counted (L-BFGS/CG iterations are NOT counted because their cost is very low compared to that of LM). Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM is a second-order method, it performs very precise steps anyway. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetcond(const_cast(state.c_ptr()), epsx, maxits, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS iterations are reported. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetxrep(const minlmstate &state, const bool needxrep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetxrep(const_cast(state.c_ptr()), needxrep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: non-zero StpMax leads to moderate performance degradation because intermediate step of preconditioned L-BFGS optimization is incompatible with limits on step size. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetstpmax(const minlmstate &state, const double stpmax, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetstpmax(const_cast(state.c_ptr()), stpmax, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets scaling coefficients for LM optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Generally, scale is NOT considered to be a form of preconditioner. But LM optimizer is unique in that it uses scaling matrix both in the stopping condition tests and as Marquardt damping factor. Proper scaling is very important for the algorithm performance. It is less important for the quality of results, but still has some influence (it is easier to converge when variables are properly scaled, so premature stopping is possible when very badly scalled variables are combined with relaxed stopping conditions). INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetscale(const minlmstate &state, const real_1d_array &s, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetscale(const_cast(state.c_ptr()), const_cast(s.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets boundary constraints for LM optimizer Boundary constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another SetBC() call. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF (latter is recommended because it will allow solver to use better algorithm). BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF (latter is recommended because it will allow solver to use better algorithm). NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: this solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints or at its boundary -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetbc(const_cast(state.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets general linear constraints for LM optimizer Linear constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another minlmsetlc() call. INPUT PARAMETERS: State - structure stores algorithm state C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT IMPORTANT: if you have linear constraints, it is strongly recommended to set scale of variables with minlmsetscale(). QP solver which is used to calculate linearly constrained steps heavily relies on good scaling of input problems. IMPORTANT: solvers created with minlmcreatefgh() do not support linear constraints. NOTE: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations. NOTE: general linear constraints add significant overhead to solution process. Although solver performs roughly same amount of iterations (when compared with similar box-only constrained problem), each iteration now involves solution of linearly constrained QP subproblem, which requires ~3-5 times more Cholesky decompositions. Thus, if you can reformulate your problem in such way this it has only box constraints, it may be beneficial to do so. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function sets general linear constraints for LM optimizer Linear constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another minlmsetlc() call. INPUT PARAMETERS: State - structure stores algorithm state C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT IMPORTANT: if you have linear constraints, it is strongly recommended to set scale of variables with minlmsetscale(). QP solver which is used to calculate linearly constrained steps heavily relies on good scaling of input problems. IMPORTANT: solvers created with minlmcreatefgh() do not support linear constraints. NOTE: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations. NOTE: general linear constraints add significant overhead to solution process. Although solver performs roughly same amount of iterations (when compared with similar box-only constrained problem), each iteration now involves solution of linearly constrained QP subproblem, which requires ~3-5 times more Cholesky decompositions. Thus, if you can reformulate your problem in such way this it has only box constraints, it may be beneficial to do so. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) _ALGLIB_CPP_EXCEPTION("Error while calling 'minlmsetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetlc(const_cast(state.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function is used to change acceleration settings You can choose between three acceleration strategies: * AccType=0, no acceleration. * AccType=1, secant updates are used to update quadratic model after each iteration. After fixed number of iterations (or after model breakdown) we recalculate quadratic model using analytic Jacobian or finite differences. Number of secant-based iterations depends on optimization settings: about 3 iterations - when we have analytic Jacobian, up to 2*N iterations - when we use finite differences to calculate Jacobian. AccType=1 is recommended when Jacobian calculation cost is prohibitively high (several Mx1 function vector calculations followed by several NxN Cholesky factorizations are faster than calculation of one M*N Jacobian). It should also be used when we have no Jacobian, because finite difference approximation takes too much time to compute. Table below list optimization protocols (XYZ protocol corresponds to MinLMCreateXYZ) and acceleration types they support (and use by default). ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: protocol 0 1 comment V + + VJ + + FGH + DEFAULT VALUES: protocol 0 1 comment V x without acceleration it is so slooooooooow VJ x FGH x NOTE: this function should be called before optimization. Attempt to call it during algorithm iterations may result in unexpected behavior. NOTE: attempt to call this function with unsupported protocol/acceleration combination will result in exception being thrown. -- ALGLIB -- Copyright 14.10.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetacctype(const minlmstate &state, const ae_int_t acctype, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmsetacctype(const_cast(state.c_ptr()), acctype, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ bool minlmiteration(const minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return 0; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); ae_bool result = alglib_impl::minlmiteration(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return *(reinterpret_cast(&result)); } void minlmoptimize(minlmstate &state, void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(fvec!=NULL, "ALGLIB: error in 'minlmoptimize()' (fvec is NULL)", &_alglib_env_state); while( alglib_impl::minlmiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfi ) { fvec(state.x, state.fi, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minlmoptimize(minlmstate &state, void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(fvec!=NULL, "ALGLIB: error in 'minlmoptimize()' (fvec is NULL)", &_alglib_env_state); alglib_impl::ae_assert(jac!=NULL, "ALGLIB: error in 'minlmoptimize()' (jac is NULL)", &_alglib_env_state); while( alglib_impl::minlmiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needfi ) { fvec(state.x, state.fi, ptr); continue; } if( state.needfij ) { jac(state.x, state.fi, state.j, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minlmoptimize(minlmstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minlmoptimize()' (func is NULL)", &_alglib_env_state); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minlmoptimize()' (grad is NULL)", &_alglib_env_state); alglib_impl::ae_assert(hess!=NULL, "ALGLIB: error in 'minlmoptimize()' (hess is NULL)", &_alglib_env_state); while( alglib_impl::minlmiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.needfgh ) { hess(state.x, state.f, state.g, state.h, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minlmoptimize(minlmstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minlmoptimize()' (func is NULL)", &_alglib_env_state); alglib_impl::ae_assert(jac!=NULL, "ALGLIB: error in 'minlmoptimize()' (jac is NULL)", &_alglib_env_state); while( alglib_impl::minlmiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.needfij ) { jac(state.x, state.fi, state.j, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } void minlmoptimize(minlmstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr), void *ptr, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::ae_assert(func!=NULL, "ALGLIB: error in 'minlmoptimize()' (func is NULL)", &_alglib_env_state); alglib_impl::ae_assert(grad!=NULL, "ALGLIB: error in 'minlmoptimize()' (grad is NULL)", &_alglib_env_state); alglib_impl::ae_assert(jac!=NULL, "ALGLIB: error in 'minlmoptimize()' (jac is NULL)", &_alglib_env_state); while( alglib_impl::minlmiteration(state.c_ptr(), &_alglib_env_state) ) { _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN if( state.needf ) { func(state.x, state.f, ptr); continue; } if( state.needfg ) { grad(state.x, state.f, state.g, ptr); continue; } if( state.needfij ) { jac(state.x, state.fi, state.j, ptr); continue; } if( state.xupdated ) { if( rep!=NULL ) rep(state.x, state.f, ptr); continue; } goto lbl_no_callback; _ALGLIB_CALLBACK_EXCEPTION_GUARD_END lbl_no_callback: alglib_impl::ae_assert(ae_false, "ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)", &_alglib_env_state); } alglib_impl::ae_state_clear(&_alglib_env_state); } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic Jacobian. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function vector at the initial point (note: future versions may also perform check at the final point) and compares numerical Jacobian with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both Jacobians, and specific components highlighted as suspicious by the OptGuard. The OptGuard report can be retrieved with minlmoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minlmsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minlmoptguardgradient(const minlmstate &state, const double teststep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmoptguardgradient(const_cast(state.c_ptr()), teststep, &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. OptGuard checks analytic Jacobian against reference value obtained by numerical differentiation with user-specified step. NOTE: other optimizers perform additional OptGuard checks for things like C0/C1-continuity violations. However, LM optimizer can check only for incorrect Jacobian. The reason is that unlike line search methods LM optimizer does not perform extensive evaluations along the line. Thus, we simply do not have enough data to catch C0/C1-violations. This check is activated with minlmoptguardgradient() function. Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradfidx for specific function (Jacobian row) suspected * rep.badgradvidx for specific variable (Jacobian column) suspected * rep.badgradxbase, a point where gradient/Jacobian is tested * rep.badgraduser, user-provided gradient/Jacobian * rep.badgradnum, reference gradient/Jacobian obtained via numerical differentiation INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - OptGuard report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlmoptguardresults(const minlmstate &state, optguardreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmoptguardresults(const_cast(state.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Levenberg-Marquardt algorithm results NOTE: if you activated OptGuard integrity checking functionality and want to get OptGuard report, it can be retrieved with the help of minlmoptguardresults() function. INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report; includes termination codes and additional information. Termination codes are listed below, see comments for this structure for more info. Termination code is stored in rep.terminationtype field: * -8 optimizer detected NAN/INF values either in the function itself, or in its Jacobian * -3 constraints are inconsistent * 2 relative step is no more than EpsX. * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible * 8 terminated by user who called minlmrequesttermination(). X contains point which was "current accepted" when termination request was submitted. -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmresults(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* Levenberg-Marquardt algorithm results Buffered implementation of MinLMResults(), which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmresultsbuf(const_cast(state.c_ptr()), const_cast(x.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine restarts LM algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used for reverse communication previously allocated with MinLMCreateXXX call. X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minlmrestartfrom(const minlmstate &state, const real_1d_array &x, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmrestartfrom(const_cast(state.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minlmrequesttermination(const minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmrequesttermination(const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatevgj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatevgj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefgj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefgj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif /************************************************************************* This function is considered obsolete since ALGLIB 3.1.0 and is present for backward compatibility only. We recommend to use MinLMCreateVJ, which provides similar, but more consistent and feature-rich interface. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) { #if !defined(AE_NO_EXCEPTIONS) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); #else _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; #endif } ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } /************************************************************************* This function is considered obsolete since ALGLIB 3.1.0 and is present for backward compatibility only. We recommend to use MinLMCreateVJ, which provides similar, but more consistent and feature-rich interface. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ #if !defined(AE_NO_EXCEPTIONS) void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams) { jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); if( setjmp(_break_jump) ) _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); ae_state_set_break_jump(&_alglib_env_state, &_break_jump); if( _xparams.flags!=0x0 ) ae_state_set_flags(&_alglib_env_state, _xparams.flags); alglib_impl::minlmcreatefj(n, m, const_cast(x.c_ptr()), const_cast(state.c_ptr()), &_alglib_env_state); alglib_impl::ae_state_clear(&_alglib_env_state); return; } #endif #endif } ///////////////////////////////////////////////////////////////////////// // // THIS SECTION CONTAINS IMPLEMENTATION OF COMPUTATIONAL CORE // ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD) static ae_int_t cqmodels_newtonrefinementits = 3; static ae_bool cqmodels_cqmrebuild(convexquadraticmodel* s, ae_state *_state); static void cqmodels_cqmsolveea(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Real */ ae_vector* tmp, ae_state *_state); #endif #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD) static double optserv_ognoiselevelf = 1.0E2*ae_machineepsilon; static double optserv_ognoiselevelg = 1.0E4*ae_machineepsilon; static double optserv_ogminrating0 = 50.0; static double optserv_ogminrating1 = 50.0; static double optserv_feasibilityerror(/* Real */ ae_matrix* ce, /* Real */ ae_vector* x, ae_int_t nmain, ae_int_t nslack, ae_int_t k, /* Real */ ae_vector* tmp0, ae_state *_state); static void optserv_feasibilityerrorgrad(/* Real */ ae_matrix* ce, /* Real */ ae_vector* x, ae_int_t nmain, ae_int_t nslack, ae_int_t k, double* err, /* Real */ ae_vector* grad, /* Real */ ae_vector* tmp0, ae_state *_state); static void optserv_testc0continuity(double f0, double f1, double f2, double f3, double noise0, double noise1, double noise2, double noise3, double delta0, double delta1, double delta2, ae_bool applyspecialcorrection, double* rating, double* lipschitz, ae_state *_state); static void optserv_c1continuitytest0(smoothnessmonitor* monitor, ae_int_t funcidx, ae_int_t stpidx, ae_int_t sortedcnt, ae_state *_state); static void optserv_c1continuitytest1(smoothnessmonitor* monitor, ae_int_t funcidx, ae_int_t stpidx, ae_int_t sortedcnt, ae_state *_state); #endif #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD) static void snnls_funcgradu(snnlssolver* s, /* Real */ ae_vector* x, /* Real */ ae_vector* r, /* Real */ ae_vector* g, double* f, ae_state *_state); static void snnls_func(snnlssolver* s, /* Real */ ae_vector* x, double* f, ae_state *_state); static void snnls_trdprepare(snnlssolver* s, /* Real */ ae_vector* x, /* Real */ ae_vector* diag, double lambdav, /* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, /* Real */ ae_vector* tmp2, /* Real */ ae_matrix* tmplq, ae_state *_state); static void snnls_trdsolve(/* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, ae_int_t ns, ae_int_t nd, /* Real */ ae_vector* d, ae_state *_state); static void snnls_trdfixvariable(/* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, ae_int_t ns, ae_int_t nd, ae_int_t idx, /* Real */ ae_vector* tmp, ae_state *_state); #endif #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD) static ae_int_t sactivesets_maxbasisage = 5; static double sactivesets_maxbasisdecay = 0.01; static double sactivesets_minnormseparation = 0.25; static void sactivesets_constraineddescent(sactiveset* state, /* Real */ ae_vector* g, /* Real */ ae_vector* h, /* Real */ ae_matrix* ha, ae_bool normalize, /* Real */ ae_vector* d, ae_state *_state); static void sactivesets_reactivateconstraints(sactiveset* state, /* Real */ ae_vector* gc, /* Real */ ae_vector* h, ae_state *_state); #endif #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD) static ae_int_t qqpsolver_quickqprestartcg = 50; static double qqpsolver_regz = 1.0E-9; static double qqpsolver_projectedtargetfunction(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* d, double stp, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, ae_state *_state); static void qqpsolver_targetgradient(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* g, ae_state *_state); static void qqpsolver_quadraticmodel(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* d, /* Real */ ae_vector* g, double* d1, ae_int_t* d1est, double* d2, ae_int_t* d2est, /* Real */ ae_vector* tmp0, ae_state *_state); static void qqpsolver_findbeststepandmove(qqpbuffers* sstate, sactiveset* sas, /* Real */ ae_vector* d, double stp, ae_bool needact, ae_int_t cidx, double cval, /* Real */ ae_vector* addsteps, ae_int_t addstepscnt, /* Boolean */ ae_vector* activated, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, ae_state *_state); static ae_bool qqpsolver_cnewtonbuild(qqpbuffers* sstate, ae_int_t sparsesolver, ae_int_t* ncholesky, ae_state *_state); static ae_bool qqpsolver_cnewtonupdate(qqpbuffers* sstate, qqpsettings* settings, ae_int_t* ncupdates, ae_state *_state); static ae_bool qqpsolver_cnewtonstep(qqpbuffers* sstate, qqpsettings* settings, /* Real */ ae_vector* gc, ae_state *_state); #endif #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD) static ae_int_t vipmsolver_maxipmits = 50; static double vipmsolver_initslackval = 100.0; static double vipmsolver_affinesteplengthdecay = 0.99; static double vipmsolver_steplengthdecay = 0.95; static double vipmsolver_stagnationdelta = 0.99999; static double vipmsolver_primalinfeasible1 = 1.0E-6; static double vipmsolver_dualinfeasible1 = 1.0E-4; static double vipmsolver_bigy = 1.0E10; static ae_int_t vipmsolver_itersfortoostringentcond = 25; static ae_int_t vipmsolver_minitersbeforestagnation = 5; static ae_int_t vipmsolver_primalstagnationlen = 5; static ae_int_t vipmsolver_dualstagnationlen = 7; static void vipmsolver_varsinitbyzero(vipmvars* vstate, ae_int_t n, ae_int_t m, ae_state *_state); static void vipmsolver_varsinitfrom(vipmvars* vstate, vipmvars* vsrc, ae_state *_state); static void vipmsolver_varsaddstep(vipmvars* vstate, vipmvars* vdir, double stpp, double stpd, ae_state *_state); static double vipmsolver_varscomputecomplementaritygap(vipmvars* vstate, ae_state *_state); static double vipmsolver_varscomputemu(vipmvars* vstate, ae_state *_state); static void vipmsolver_vipminit(vipmstate* state, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, ae_int_t nmain, ae_int_t ftype, ae_state *_state); static double vipmsolver_nrminf(/* Real */ ae_vector* x, ae_int_t n, ae_state *_state); static void vipmsolver_vipminitialpoint(vipmstate* state, ae_state *_state); static double vipmsolver_vipmtarget(vipmstate* state, /* Real */ ae_vector* x, ae_state *_state); static void vipmsolver_vipmmultiply(vipmstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* y, /* Real */ ae_vector* hx, /* Real */ ae_vector* ax, /* Real */ ae_vector* aty, ae_state *_state); static void vipmsolver_vipmpowerup(vipmstate* state, ae_state *_state); static void vipmsolver_vipmfactorize(vipmstate* state, double alpha0, /* Real */ ae_vector* d, double beta0, /* Real */ ae_vector* e, double alpha1, double beta1, ae_state *_state); static void vipmsolver_vipmsolve(vipmstate* state, /* Real */ ae_vector* deltaxy, ae_state *_state); static void vipmsolver_vipmprecomputenewtonfactorization(vipmstate* state, vipmvars* v0, ae_state *_state); static void vipmsolver_vipmcomputestepdirection(vipmstate* state, vipmvars* v0, double mu, vipmvars* vd, ae_state *_state); static void vipmsolver_vipmcomputesteplength(vipmstate* state, vipmvars* v0, vipmvars* vs, double stepdecay, double* alphap, double* alphad, ae_state *_state); static void vipmsolver_vipmevaluateprogress(vipmstate* state, ae_bool dotrace, ae_bool dodetailedtrace, double mu, double muaff, double sigma, double alphap, double alphad, ae_state *_state); #endif #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD) static double nlcsqp_sqpdeltadecrease = 0.20; static double nlcsqp_sqpdeltaincrease = 0.80; static double nlcsqp_maxtrustraddecay = 0.1; static double nlcsqp_maxtrustradgrowth = 1.333; static double nlcsqp_bigc = 500.0; static double nlcsqp_meritfunctionbase = 0.0; static double nlcsqp_meritfunctiongain = 2.0; static double nlcsqp_augmentationfactor = 10.0; static double nlcsqp_inittrustrad = 0.1; static double nlcsqp_stagnationepsf = 1.0E-12; static ae_int_t nlcsqp_fstagnationlimit = 20; static double nlcsqp_sqpbigscale = 5.0; static double nlcsqp_sqpsmallscale = 0.2; static void nlcsqp_initqpsubsolver(minsqpstate* sstate, minsqpsubsolver* subsolver, ae_state *_state); static void nlcsqp_qpsubsolversetalgoipm(minsqpsubsolver* subsolver, ae_state *_state); static void nlcsqp_qpsubsolversetalgofastactiveset(minsqpsubsolver* subsolver, /* Real */ ae_vector* lagmult, ae_state *_state); static ae_bool nlcsqp_qpsubproblemupdatehessian(minsqpstate* sstate, minsqpsubsolver* subsolver, /* Real */ ae_vector* x0, /* Real */ ae_vector* g0, /* Real */ ae_vector* x1, /* Real */ ae_vector* g1, ae_state *_state); static void nlcsqp_fassolve(minsqpsubsolver* subsolver, /* Real */ ae_vector* d0, /* Real */ ae_matrix* h, ae_int_t nq, /* Real */ ae_vector* b, ae_int_t n, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, sparsematrix* a, ae_int_t m, /* Real */ ae_vector* al, /* Real */ ae_vector* au, double trustrad, ae_int_t* terminationtype, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state); static ae_bool nlcsqp_qpsubproblemsolve(minsqpstate* state, minsqpsubsolver* subsolver, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state); static void nlcsqp_meritphaseinit(minsqpmeritphasestate* meritstate, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, ae_state *_state); static ae_bool nlcsqp_meritphaseiteration(minsqpstate* state, minsqpmeritphasestate* meritstate, smoothnessmonitor* smonitor, ae_bool userterminationneeded, ae_state *_state); static void nlcsqp_meritphaseresults(minsqpmeritphasestate* meritstate, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, ae_int_t* status, ae_state *_state); static void nlcsqp_sqpsendx(minsqpstate* state, /* Real */ ae_vector* xs, ae_state *_state); static ae_bool nlcsqp_sqpretrievefij(minsqpstate* state, /* Real */ ae_vector* fis, /* Real */ ae_matrix* js, ae_state *_state); static void nlcsqp_sqpcopystate(minsqpstate* state, /* Real */ ae_vector* x0, /* Real */ ae_vector* fi0, /* Real */ ae_matrix* j0, /* Real */ ae_vector* x1, /* Real */ ae_vector* fi1, /* Real */ ae_matrix* j1, ae_state *_state); static void nlcsqp_lagrangianfg(minsqpstate* state, /* Real */ ae_vector* x, double trustrad, /* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* lagmult, minsqptmplagrangian* tmp, double* f, /* Real */ ae_vector* g, ae_state *_state); static double nlcsqp_meritfunction(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, ae_state *_state); static double nlcsqp_rawlagrangian(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, ae_state *_state); static void nlcsqp_meritfunctionandrawlagrangian(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, double* meritf, double* rawlag, ae_state *_state); #endif #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD) static double minlbfgs_gtol = 0.4; static void minlbfgs_clearrequestfields(minlbfgsstate* state, ae_state *_state); #endif #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD) static double qpdenseaulsolver_evictionlevel = -0.01; static double qpdenseaulsolver_expansionratio = 0.20; static void qpdenseaulsolver_generateexmodel(/* Real */ ae_matrix* sclsfta, /* Real */ ae_vector* sclsftb, ae_int_t nmain, /* Real */ ae_vector* sclsftbndl, /* Boolean */ ae_vector* sclsfthasbndl, /* Real */ ae_vector* sclsftbndu, /* Boolean */ ae_vector* sclsfthasbndu, /* Real */ ae_matrix* sclsftcleic, ae_int_t sclsftnec, ae_int_t sclsftnic, /* Real */ ae_vector* nulc, double rho, /* Real */ ae_matrix* exa, /* Real */ ae_vector* exb, /* Real */ ae_vector* exbndl, /* Real */ ae_vector* exbndu, /* Real */ ae_matrix* tmp2, ae_state *_state); static void qpdenseaulsolver_generateexinitialpoint(/* Real */ ae_vector* sclsftxc, ae_int_t nmain, ae_int_t nslack, /* Real */ ae_vector* exxc, ae_state *_state); static void qpdenseaulsolver_updatelagrangemultipliers(/* Real */ ae_matrix* sclsfta, /* Real */ ae_vector* sclsftb, ae_int_t nmain, /* Real */ ae_vector* sclsftbndl, /* Boolean */ ae_vector* sclsfthasbndl, /* Real */ ae_vector* sclsftbndu, /* Boolean */ ae_vector* sclsfthasbndu, /* Real */ ae_matrix* sclsftcleic, ae_int_t sclsftnec, ae_int_t sclsftnic, /* Real */ ae_vector* exxc, /* Real */ ae_vector* nulcest, qpdenseaulbuffers* buffers, ae_state *_state); static void qpdenseaulsolver_scaleshiftoriginalproblem(convexquadraticmodel* a, sparsematrix* sparsea, ae_int_t akind, ae_bool sparseaupper, /* Real */ ae_vector* b, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t nmain, /* Real */ ae_matrix* cleic, ae_int_t dnec, ae_int_t dnic, sparsematrix* scleic, ae_int_t snec, ae_int_t snic, ae_bool renormlc, qpdenseaulbuffers* state, /* Real */ ae_vector* xs, ae_state *_state); static double qpdenseaulsolver_normalizequadraticterm(/* Real */ ae_matrix* a, /* Real */ ae_vector* b, ae_int_t n, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, ae_bool usecleic, /* Real */ ae_matrix* tmp2, ae_state *_state); static void qpdenseaulsolver_selectinitialworkingset(/* Real */ ae_matrix* a, ae_int_t nmain, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, /* Real */ ae_vector* tmp0, /* Real */ ae_matrix* tmp2, ae_int_t* nicwork, ae_bool* allowwseviction, ae_state *_state); #endif #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD) static double minbleic_gtol = 0.4; static double minbleic_maxnonmonotoniclen = 1.0E-7; static double minbleic_nmstol = 1.0E2; static double minbleic_initialdecay = 0.5; static double minbleic_mindecay = 0.1; static double minbleic_decaycorrection = 0.8; static double minbleic_penaltyfactor = 100; static void minbleic_clearrequestfields(minbleicstate* state, ae_state *_state); static void minbleic_minbleicinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbleicstate* state, ae_state *_state); static void minbleic_updateestimateofgoodstep(double* estimate, double newstep, ae_state *_state); static double minbleic_feasibilityerror(/* Real */ ae_vector* x, /* Real */ ae_vector* s, ae_int_t n, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, ae_state *_state); #endif #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD) #endif #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD) static ae_int_t reviseddualsimplex_maxforcedrestarts = 1; static ae_int_t reviseddualsimplex_safetrfage = 5; static ae_int_t reviseddualsimplex_defaultmaxtrfage = 50; static double reviseddualsimplex_minbeta = 1.0E-4; static double reviseddualsimplex_maxudecay = 0.001; static double reviseddualsimplex_shiftlen = 1.0E-12; static double reviseddualsimplex_dtol = 1.0E-6; static double reviseddualsimplex_xtol = 1.0E-9; static double reviseddualsimplex_alphatrigger = 1.0E8*ae_machineepsilon; static double reviseddualsimplex_alphatrigger2 = 0.001; static ae_int_t reviseddualsimplex_ssinvalid = 0; static ae_int_t reviseddualsimplex_ssvalidxn = 1; static ae_int_t reviseddualsimplex_ssvalid = 2; static ae_int_t reviseddualsimplex_ccfixed = 0; static ae_int_t reviseddualsimplex_cclower = 1; static ae_int_t reviseddualsimplex_ccupper = 2; static ae_int_t reviseddualsimplex_ccrange = 3; static ae_int_t reviseddualsimplex_ccfree = 4; static ae_int_t reviseddualsimplex_ccinfeasible = 5; static void reviseddualsimplex_subprobleminit(ae_int_t n, dualsimplexsubproblem* s, ae_state *_state); static void reviseddualsimplex_subprobleminitphase1(dualsimplexsubproblem* s0, dualsimplexbasis* basis, dualsimplexsubproblem* s1, ae_state *_state); static void reviseddualsimplex_subprobleminitphase3(dualsimplexsubproblem* s0, dualsimplexsubproblem* s1, ae_state *_state); static void reviseddualsimplex_subprobleminferinitialxn(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state); static void reviseddualsimplex_subproblemhandlexnupdate(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state); static double reviseddualsimplex_initialdualfeasibilitycorrection(dualsimplexstate* state, dualsimplexsubproblem* s, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_shifting(dualsimplexstate* state, dualsimplexsubproblem* s, /* Real */ ae_vector* alphar, double delta, ae_int_t q, double* thetad, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_pricingstep(dualsimplexstate* state, dualsimplexsubproblem* s, ae_bool phase1pricing, ae_int_t* p, ae_int_t* r, double* delta, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_ratiotest(dualsimplexstate* state, dualsimplexsubproblem* s, /* Real */ ae_vector* alphar, double delta, ae_int_t p, ae_int_t* q, double* thetad, /* Integer */ ae_vector* possibleflips, ae_int_t* possibleflipscnt, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_updatestep(dualsimplexstate* state, dualsimplexsubproblem* s, ae_int_t p, ae_int_t q, ae_int_t r, double delta, double alphapiv, double thetap, double thetad, /* Real */ ae_vector* alphaq, /* Real */ ae_vector* alphaqim, /* Real */ ae_vector* alphar, /* Real */ ae_vector* tau, /* Integer */ ae_vector* possibleflips, ae_int_t possibleflipscnt, dualsimplexsettings* settings, ae_state *_state); static ae_bool reviseddualsimplex_refactorizationrequired(dualsimplexstate* state, dualsimplexsubproblem* s, ae_int_t q, ae_int_t r, ae_state *_state); static void reviseddualsimplex_solvesubproblemdual(dualsimplexstate* state, dualsimplexsubproblem* s, ae_bool isphase1, dualsimplexsettings* settings, ae_int_t* info, ae_state *_state); static void reviseddualsimplex_solvesubproblemprimal(dualsimplexstate* state, dualsimplexsubproblem* s, dualsimplexsettings* settings, ae_int_t* info, ae_state *_state); static void reviseddualsimplex_invokephase1(dualsimplexstate* state, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_solveboxonly(dualsimplexstate* state, ae_state *_state); static void reviseddualsimplex_setzeroxystats(dualsimplexstate* state, ae_state *_state); static void reviseddualsimplex_basisinit(ae_int_t ns, ae_int_t m, dualsimplexbasis* s, ae_state *_state); static ae_bool reviseddualsimplex_basistryresize(dualsimplexbasis* s, ae_int_t newm, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state); static double reviseddualsimplex_basisminimumdiagonalelement(dualsimplexbasis* s, ae_state *_state); static void reviseddualsimplex_basisexportto(dualsimplexbasis* s0, dualsimplexbasis* s1, ae_state *_state); static ae_bool reviseddualsimplex_basistryimportfrom(dualsimplexbasis* s0, dualsimplexbasis* s1, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_basisfreshtrf(dualsimplexbasis* s, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state); static double reviseddualsimplex_basisfreshtrfunsafe(dualsimplexbasis* s, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_basisrequestweights(dualsimplexbasis* s, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_basisupdatetrf(dualsimplexbasis* s, sparsematrix* at, ae_int_t p, ae_int_t q, /* Real */ ae_vector* alphaq, /* Real */ ae_vector* alphaqim, ae_int_t r, /* Real */ ae_vector* tau, dualsimplexsettings* settings, ae_state *_state); static void reviseddualsimplex_basissolve(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* tmpx, ae_state *_state); static void reviseddualsimplex_basissolvex(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* xim, ae_bool needintermediate, /* Real */ ae_vector* tx, ae_state *_state); static void reviseddualsimplex_basissolvet(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* tx, ae_state *_state); static void reviseddualsimplex_computeanxn(dualsimplexstate* state, dualsimplexsubproblem* subproblem, /* Real */ ae_vector* x, /* Real */ ae_vector* y, ae_state *_state); static void reviseddualsimplex_computeantv(dualsimplexstate* state, /* Real */ ae_vector* y, /* Real */ ae_vector* r, ae_state *_state); static ae_bool reviseddualsimplex_hasbndl(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state); static ae_bool reviseddualsimplex_hasbndu(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state); static ae_bool reviseddualsimplex_isfree(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state); static void reviseddualsimplex_downgradestate(dualsimplexsubproblem* subproblem, ae_int_t s, ae_state *_state); static double reviseddualsimplex_dualfeasibilityerror(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state); static ae_bool reviseddualsimplex_isdualfeasible(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state); static void reviseddualsimplex_pivottobwd(/* Integer */ ae_vector* p, ae_int_t m, /* Integer */ ae_vector* bwd, ae_state *_state); static void reviseddualsimplex_inversecyclicpermutation(/* Integer */ ae_vector* bwd, ae_int_t m, ae_int_t d, /* Integer */ ae_vector* tmpi, ae_state *_state); static void reviseddualsimplex_offloadbasiccomponents(dualsimplexsubproblem* s, dualsimplexbasis* basis, ae_state *_state); static void reviseddualsimplex_recombinebasicnonbasicx(dualsimplexsubproblem* s, dualsimplexbasis* basis, ae_state *_state); static void reviseddualsimplex_unscaleandenforce(/* Real */ ae_vector* repx, /* Real */ ae_vector* repy, /* Real */ ae_vector* repdx, dualsimplexstate* s, ae_state *_state); #endif #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD) static ae_int_t minlp_alllogicalsbasis = 0; static void minlp_clearreportfields(minlpstate* state, ae_state *_state); #endif #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD) static double nlcslp_slpstpclosetozero = 0.001; static double nlcslp_slpdeltadecrease = 0.20; static double nlcslp_slpdeltaincrease = 0.80; static double nlcslp_slpstpclosetoone = 0.95; static double nlcslp_maxtrustraddecay = 0.1; static double nlcslp_maxtrustradgrowth = 1.333; static double nlcslp_slpgtol = 0.4; static double nlcslp_bigc = 500.0; static double nlcslp_bfgstol = 1.0E-5; static double nlcslp_meritfunctionbase = 0.0; static double nlcslp_meritfunctiongain = 2.0; static double nlcslp_inequalitydampingfactor = 10.0; static double nlcslp_augmentationfactor = 10.0; static double nlcslp_inittrustrad = 0.1; static double nlcslp_stagnationepsf = 1.0E-12; static ae_int_t nlcslp_lpfailureslimit = 20; static ae_int_t nlcslp_fstagnationlimit = 20; static ae_int_t nlcslp_nondescentlimit = 99999; static ae_int_t nlcslp_nonmonotonicphase2limit = 5; static double nlcslp_slpbigscale = 5.0; static double nlcslp_slpsmallscale = 0.2; static void nlcslp_initlpsubsolver(minslpstate* sstate, minslpsubsolver* subsolver, ae_int_t hessiantype, ae_state *_state); static void nlcslp_lpsubproblemrestart(minslpstate* sstate, minslpsubsolver* subsolver, ae_state *_state); static void nlcslp_lpsubproblemupdatehessian(minslpstate* sstate, minslpsubsolver* subsolver, /* Real */ ae_vector* x0, /* Real */ ae_vector* g0, /* Real */ ae_vector* x1, /* Real */ ae_vector* g1, ae_state *_state); static ae_bool nlcslp_lpsubproblemsolve(minslpstate* state, minslpsubsolver* subsolver, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, ae_int_t innerk, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state); static void nlcslp_lpsubproblemappendconjugacyconstraint(minslpstate* state, minslpsubsolver* subsolver, /* Real */ ae_vector* d, ae_state *_state); static void nlcslp_phase13init(minslpphase13state* state13, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, ae_bool usecorrection, ae_state *_state); static ae_bool nlcslp_phase13iteration(minslpstate* state, minslpphase13state* state13, smoothnessmonitor* smonitor, ae_bool userterminationneeded, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, ae_int_t* status, double* stp, ae_state *_state); static void nlcslp_phase2init(minslpphase2state* state2, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, /* Real */ ae_vector* meritlagmult, ae_state *_state); static ae_bool nlcslp_phase2iteration(minslpstate* state, minslpphase2state* state2, smoothnessmonitor* smonitor, ae_bool userterminationneeded, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, double* gammamax, ae_int_t* status, ae_state *_state); static void nlcslp_slpsendx(minslpstate* state, /* Real */ ae_vector* xs, ae_state *_state); static ae_bool nlcslp_slpretrievefij(minslpstate* state, /* Real */ ae_vector* fis, /* Real */ ae_matrix* js, ae_state *_state); static void nlcslp_slpcopystate(minslpstate* state, /* Real */ ae_vector* x0, /* Real */ ae_vector* fi0, /* Real */ ae_matrix* j0, /* Real */ ae_vector* x1, /* Real */ ae_vector* fi1, /* Real */ ae_matrix* j1, ae_state *_state); static void nlcslp_lagrangianfg(minslpstate* state, /* Real */ ae_vector* x, double trustrad, /* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* lagmult, minslptmplagrangian* tmp, double* f, /* Real */ ae_vector* g, double* lcerr, ae_int_t* lcidx, double* nlcerr, ae_int_t* nlcidx, ae_state *_state); static double nlcslp_meritfunction(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, ae_state *_state); static double nlcslp_rawlagrangian(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, ae_state *_state); static void nlcslp_meritfunctionandrawlagrangian(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, double* meritf, double* rawlag, ae_state *_state); #endif #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD) static double minnlc_aulmaxgrowth = 10.0; static double minnlc_maxlagmult = 1.0E7; static ae_int_t minnlc_lbfgsfactor = 10; static double minnlc_hessesttol = 1.0E-6; static double minnlc_initgamma = 1.0E-6; static double minnlc_regprec = 1.0E-6; static void minnlc_clearrequestfields(minnlcstate* state, ae_state *_state); static void minnlc_minnlcinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnlcstate* state, ae_state *_state); static void minnlc_clearpreconditioner(minlbfgsstate* auloptimizer, ae_state *_state); static void minnlc_updatepreconditioner(ae_int_t prectype, ae_int_t updatefreq, ae_int_t* preccounter, minlbfgsstate* auloptimizer, /* Real */ ae_vector* x, double rho, double gammak, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* hasbndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* nubc, /* Real */ ae_matrix* cleic, /* Real */ ae_vector* nulc, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, /* Real */ ae_vector* nunlc, /* Real */ ae_vector* bufd, /* Real */ ae_vector* bufc, /* Real */ ae_matrix* bufw, /* Real */ ae_matrix* bufz, /* Real */ ae_vector* tmp0, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t ng, ae_int_t nh, ae_state *_state); static void minnlc_penaltybc(/* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* hasbndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* nubc, ae_int_t n, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state); static void minnlc_penaltylc(/* Real */ ae_vector* x, /* Real */ ae_matrix* cleic, /* Real */ ae_vector* nulc, ae_int_t n, ae_int_t nec, ae_int_t nic, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state); static void minnlc_penaltynlc(/* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* nunlc, ae_int_t n, ae_int_t ng, ae_int_t nh, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state); static ae_bool minnlc_auliteration(minnlcstate* state, smoothnessmonitor* smonitor, ae_state *_state); static void minnlc_unscale(minnlcstate* state, /* Real */ ae_vector* xs, /* Real */ ae_vector* scaledbndl, /* Real */ ae_vector* scaledbndu, /* Real */ ae_vector* xu, ae_state *_state); #endif #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD) static double minbc_gtol = 0.4; static double minbc_maxnonmonotoniclen = 1.0E-5; static double minbc_initialdecay = 0.5; static double minbc_mindecay = 0.1; static double minbc_decaycorrection = 0.8; static void minbc_clearrequestfields(minbcstate* state, ae_state *_state); static void minbc_minbcinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbcstate* state, ae_state *_state); static void minbc_updateestimateofgoodstep(double* estimate, double newstep, ae_state *_state); #endif #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD) static void minns_clearrequestfields(minnsstate* state, ae_state *_state); static void minns_minnsinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnsstate* state, ae_state *_state); static ae_bool minns_agsiteration(minnsstate* state, ae_state *_state); static void minns_generatemeritfunction(minnsstate* state, ae_int_t sampleidx, ae_state *_state); static void minns_unscalepointbc(minnsstate* state, /* Real */ ae_vector* x, ae_state *_state); static void minns_solveqp(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, ae_int_t* dbgncholesky, minnsqp* state, ae_state *_state); static void minns_qpcalculategradfunc(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, /* Real */ ae_vector* g, double* f, /* Real */ ae_vector* tmp, ae_state *_state); static void minns_qpcalculatefunc(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, double* f, /* Real */ ae_vector* tmp, ae_state *_state); static void minns_qpsolveu(/* Real */ ae_matrix* a, ae_int_t n, /* Real */ ae_vector* x, ae_state *_state); static void minns_qpsolveut(/* Real */ ae_matrix* a, ae_int_t n, /* Real */ ae_vector* x, ae_state *_state); #endif #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD) static ae_int_t mincomp_n1 = 2; static ae_int_t mincomp_n2 = 2; static double mincomp_stpmin = 1.0E-300; static double mincomp_gtol = 0.3; static double mincomp_gpaftol = 0.0001; static double mincomp_gpadecay = 0.5; static double mincomp_asarho = 0.5; static double mincomp_asaboundedantigradnorm(minasastate* state, ae_state *_state); static double mincomp_asaginorm(minasastate* state, ae_state *_state); static double mincomp_asad1norm(minasastate* state, ae_state *_state); static ae_bool mincomp_asauisempty(minasastate* state, ae_state *_state); static void mincomp_clearrequestfields(minasastate* state, ae_state *_state); #endif #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD) static ae_int_t mincg_rscountdownlen = 10; static double mincg_gtol = 0.3; static void mincg_clearrequestfields(mincgstate* state, ae_state *_state); static void mincg_preconditionedmultiply(mincgstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* work0, /* Real */ ae_vector* work1, ae_state *_state); static double mincg_preconditionedmultiply2(mincgstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* y, /* Real */ ae_vector* work0, /* Real */ ae_vector* work1, ae_state *_state); static void mincg_mincginitinternal(ae_int_t n, double diffstep, mincgstate* state, ae_state *_state); #endif #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD) static double minlm_lambdaup = 2.0; static double minlm_lambdadown = 0.33; static double minlm_suspiciousnu = 16; static ae_int_t minlm_smallmodelage = 3; static ae_int_t minlm_additers = 5; static void minlm_lmprepare(ae_int_t n, ae_int_t m, ae_bool havegrad, minlmstate* state, ae_state *_state); static void minlm_clearrequestfields(minlmstate* state, ae_state *_state); static ae_bool minlm_increaselambda(double* lambdav, double* nu, ae_state *_state); static void minlm_decreaselambda(double* lambdav, double* nu, ae_state *_state); static ae_int_t minlm_checkdecrease(/* Real */ ae_matrix* quadraticmodel, /* Real */ ae_vector* gbase, double fbase, ae_int_t n, /* Real */ ae_vector* deltax, double fnew, double* lambdav, double* nu, ae_state *_state); static ae_bool minlm_minlmstepfinderinit(minlmstepfinder* state, ae_int_t n, ae_int_t m, ae_int_t maxmodelage, ae_bool hasfi, /* Real */ ae_vector* xbase, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, /* Real */ ae_vector* s, double stpmax, double epsx, ae_state *_state); static void minlm_minlmstepfinderstart(minlmstepfinder* state, /* Real */ ae_matrix* quadraticmodel, /* Real */ ae_vector* gbase, double fbase, /* Real */ ae_vector* xbase, /* Real */ ae_vector* fibase, ae_int_t modelage, ae_state *_state); static ae_bool minlm_minlmstepfinderiteration(minlmstepfinder* state, double* lambdav, double* nu, /* Real */ ae_vector* xnew, /* Real */ ae_vector* deltax, ae_bool* deltaxready, /* Real */ ae_vector* deltaf, ae_bool* deltafready, ae_int_t* iflag, double* fnew, ae_int_t* ncholesky, ae_state *_state); #endif #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This subroutine is used to initialize CQM. By default, empty NxN model is generated, with Alpha=Lambda=Theta=0.0 and zero b. Previously allocated buffer variables are reused as much as possible. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state) { ae_int_t i; s->n = n; s->k = 0; s->nfree = n; s->ecakind = -1; s->alpha = 0.0; s->tau = 0.0; s->theta = 0.0; s->ismaintermchanged = ae_true; s->issecondarytermchanged = ae_true; s->islineartermchanged = ae_true; s->isactivesetchanged = ae_true; bvectorsetlengthatleast(&s->activeset, n, _state); rvectorsetlengthatleast(&s->xc, n, _state); rvectorsetlengthatleast(&s->eb, n, _state); rvectorsetlengthatleast(&s->tq1, n, _state); rvectorsetlengthatleast(&s->txc, n, _state); rvectorsetlengthatleast(&s->tb, n, _state); rvectorsetlengthatleast(&s->b, s->n, _state); rvectorsetlengthatleast(&s->tk1, s->n, _state); for(i=0; i<=n-1; i++) { s->activeset.ptr.p_bool[i] = ae_false; s->xc.ptr.p_double[i] = 0.0; s->b.ptr.p_double[i] = 0.0; } } /************************************************************************* This subroutine changes main quadratic term of the model. INPUT PARAMETERS: S - model A - NxN matrix, only upper or lower triangle is referenced IsUpper - True, when matrix is stored in upper triangle Alpha - multiplier; when Alpha=0, A is not referenced at all -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmseta(convexquadraticmodel* s, /* Real */ ae_matrix* a, ae_bool isupper, double alpha, ae_state *_state) { ae_int_t i; ae_int_t j; double v; ae_assert(ae_isfinite(alpha, _state)&&ae_fp_greater_eq(alpha,(double)(0)), "CQMSetA: Alpha<0 or is not finite number", _state); ae_assert(ae_fp_eq(alpha,(double)(0))||isfinitertrmatrix(a, s->n, isupper, _state), "CQMSetA: A is not finite NxN matrix", _state); s->alpha = alpha; if( ae_fp_greater(alpha,(double)(0)) ) { rmatrixsetlengthatleast(&s->a, s->n, s->n, _state); rmatrixsetlengthatleast(&s->ecadense, s->n, s->n, _state); rmatrixsetlengthatleast(&s->tq2dense, s->n, s->n, _state); for(i=0; i<=s->n-1; i++) { for(j=i; j<=s->n-1; j++) { if( isupper ) { v = a->ptr.pp_double[i][j]; } else { v = a->ptr.pp_double[j][i]; } s->a.ptr.pp_double[i][j] = v; s->a.ptr.pp_double[j][i] = v; } } } s->ismaintermchanged = ae_true; } /************************************************************************* This subroutine changes main quadratic term of the model. INPUT PARAMETERS: S - model A - possibly preallocated buffer OUTPUT PARAMETERS: A - NxN matrix, full matrix is returned. Zero matrix is returned if model is empty. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmgeta(convexquadraticmodel* s, /* Real */ ae_matrix* a, ae_state *_state) { ae_int_t i; ae_int_t j; double v; ae_int_t n; n = s->n; rmatrixsetlengthatleast(a, n, n, _state); if( ae_fp_greater(s->alpha,(double)(0)) ) { v = s->alpha; for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { a->ptr.pp_double[i][j] = v*s->a.ptr.pp_double[i][j]; } } } else { for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { a->ptr.pp_double[i][j] = 0.0; } } } } /************************************************************************* This subroutine rewrites diagonal of the main quadratic term of the model (dense A) by vector Z/Alpha (current value of the Alpha coefficient is used). IMPORTANT: in case model has no dense quadratic term, this function allocates N*N dense matrix of zeros, and fills its diagonal by non-zero values. INPUT PARAMETERS: S - model Z - new diagonal, array[N] -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmrewritedensediagonal(convexquadraticmodel* s, /* Real */ ae_vector* z, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; n = s->n; if( ae_fp_eq(s->alpha,(double)(0)) ) { rmatrixsetlengthatleast(&s->a, s->n, s->n, _state); rmatrixsetlengthatleast(&s->ecadense, s->n, s->n, _state); rmatrixsetlengthatleast(&s->tq2dense, s->n, s->n, _state); for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { s->a.ptr.pp_double[i][j] = 0.0; } } s->alpha = 1.0; } for(i=0; i<=s->n-1; i++) { s->a.ptr.pp_double[i][i] = z->ptr.p_double[i]/s->alpha; } s->ismaintermchanged = ae_true; } /************************************************************************* This subroutine changes diagonal quadratic term of the model. INPUT PARAMETERS: S - model D - array[N], semidefinite diagonal matrix Tau - multiplier; when Tau=0, D is not referenced at all -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmsetd(convexquadraticmodel* s, /* Real */ ae_vector* d, double tau, ae_state *_state) { ae_int_t i; ae_assert(ae_isfinite(tau, _state)&&ae_fp_greater_eq(tau,(double)(0)), "CQMSetD: Tau<0 or is not finite number", _state); ae_assert(ae_fp_eq(tau,(double)(0))||isfinitevector(d, s->n, _state), "CQMSetD: D is not finite Nx1 vector", _state); s->tau = tau; if( ae_fp_greater(tau,(double)(0)) ) { rvectorsetlengthatleast(&s->d, s->n, _state); rvectorsetlengthatleast(&s->ecadiag, s->n, _state); rvectorsetlengthatleast(&s->tq2diag, s->n, _state); for(i=0; i<=s->n-1; i++) { ae_assert(ae_fp_greater_eq(d->ptr.p_double[i],(double)(0)), "CQMSetD: D[i]<0", _state); s->d.ptr.p_double[i] = d->ptr.p_double[i]; } } s->ismaintermchanged = ae_true; } /************************************************************************* This subroutine drops main quadratic term A from the model. It is same as call to CQMSetA() with zero A, but gives better performance because algorithm knows that matrix is zero and can optimize subsequent calculations. INPUT PARAMETERS: S - model -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmdropa(convexquadraticmodel* s, ae_state *_state) { s->alpha = 0.0; s->ismaintermchanged = ae_true; } /************************************************************************* This subroutine changes linear term of the model -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmsetb(convexquadraticmodel* s, /* Real */ ae_vector* b, ae_state *_state) { ae_int_t i; ae_assert(isfinitevector(b, s->n, _state), "CQMSetB: B is not finite vector", _state); rvectorsetlengthatleast(&s->b, s->n, _state); for(i=0; i<=s->n-1; i++) { s->b.ptr.p_double[i] = b->ptr.p_double[i]; } s->islineartermchanged = ae_true; } /************************************************************************* This subroutine changes linear term of the model -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmsetq(convexquadraticmodel* s, /* Real */ ae_matrix* q, /* Real */ ae_vector* r, ae_int_t k, double theta, ae_state *_state) { ae_int_t i; ae_int_t j; ae_assert(k>=0, "CQMSetQ: K<0", _state); ae_assert((k==0||ae_fp_eq(theta,(double)(0)))||apservisfinitematrix(q, k, s->n, _state), "CQMSetQ: Q is not finite matrix", _state); ae_assert((k==0||ae_fp_eq(theta,(double)(0)))||isfinitevector(r, k, _state), "CQMSetQ: R is not finite vector", _state); ae_assert(ae_isfinite(theta, _state)&&ae_fp_greater_eq(theta,(double)(0)), "CQMSetQ: Theta<0 or is not finite number", _state); /* * degenerate case: K=0 or Theta=0 */ if( k==0||ae_fp_eq(theta,(double)(0)) ) { s->k = 0; s->theta = (double)(0); s->issecondarytermchanged = ae_true; return; } /* * General case: both Theta>0 and K>0 */ s->k = k; s->theta = theta; rmatrixsetlengthatleast(&s->q, s->k, s->n, _state); rvectorsetlengthatleast(&s->r, s->k, _state); rmatrixsetlengthatleast(&s->eq, s->k, s->n, _state); rmatrixsetlengthatleast(&s->eccm, s->k, s->k, _state); rmatrixsetlengthatleast(&s->tk2, s->k, s->n, _state); for(i=0; i<=s->k-1; i++) { for(j=0; j<=s->n-1; j++) { s->q.ptr.pp_double[i][j] = q->ptr.pp_double[i][j]; } s->r.ptr.p_double[i] = r->ptr.p_double[i]; } s->issecondarytermchanged = ae_true; } /************************************************************************* This subroutine changes active set INPUT PARAMETERS S - model X - array[N], constraint values ActiveSet- array[N], active set. If ActiveSet[I]=True, then I-th variables is constrained to X[I]. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmsetactiveset(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Boolean */ ae_vector* activeset, ae_state *_state) { ae_int_t i; ae_assert(x->cnt>=s->n, "CQMSetActiveSet: Length(X)cnt>=s->n, "CQMSetActiveSet: Length(ActiveSet)n-1; i++) { s->isactivesetchanged = s->isactivesetchanged||(s->activeset.ptr.p_bool[i]&&!activeset->ptr.p_bool[i]); s->isactivesetchanged = s->isactivesetchanged||(activeset->ptr.p_bool[i]&&!s->activeset.ptr.p_bool[i]); s->activeset.ptr.p_bool[i] = activeset->ptr.p_bool[i]; if( activeset->ptr.p_bool[i] ) { ae_assert(ae_isfinite(x->ptr.p_double[i], _state), "CQMSetActiveSet: X[] contains infinite constraints", _state); s->isactivesetchanged = s->isactivesetchanged||ae_fp_neq(s->xc.ptr.p_double[i],x->ptr.p_double[i]); s->xc.ptr.p_double[i] = x->ptr.p_double[i]; } } } /************************************************************************* This subroutine evaluates model at X. Active constraints are ignored. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ double cqmeval(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; double v; double result; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMEval: X is not finite vector", _state); result = 0.0; /* * main quadratic term */ if( ae_fp_greater(s->alpha,(double)(0)) ) { for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { result = result+s->alpha*0.5*x->ptr.p_double[i]*s->a.ptr.pp_double[i][j]*x->ptr.p_double[j]; } } } if( ae_fp_greater(s->tau,(double)(0)) ) { for(i=0; i<=n-1; i++) { result = result+0.5*ae_sqr(x->ptr.p_double[i], _state)*s->tau*s->d.ptr.p_double[i]; } } /* * secondary quadratic term */ if( ae_fp_greater(s->theta,(double)(0)) ) { for(i=0; i<=s->k-1; i++) { v = ae_v_dotproduct(&s->q.ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); result = result+0.5*s->theta*ae_sqr(v-s->r.ptr.p_double[i], _state); } } /* * linear term */ for(i=0; i<=s->n-1; i++) { result = result+x->ptr.p_double[i]*s->b.ptr.p_double[i]; } return result; } /************************************************************************* This subroutine evaluates model at X. Active constraints are ignored. It returns: R - model value Noise- estimate of the numerical noise in data -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmevalx(convexquadraticmodel* s, /* Real */ ae_vector* x, double* r, double* noise, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; double v; double v2; double mxq; double eps; *r = 0; *noise = 0; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMEval: X is not finite vector", _state); *r = 0.0; *noise = 0.0; eps = 2*ae_machineepsilon; mxq = 0.0; /* * Main quadratic term. * * Noise from the main quadratic term is equal to the * maximum summand in the term. */ if( ae_fp_greater(s->alpha,(double)(0)) ) { for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { v = s->alpha*0.5*x->ptr.p_double[i]*s->a.ptr.pp_double[i][j]*x->ptr.p_double[j]; *r = *r+v; *noise = ae_maxreal(*noise, eps*ae_fabs(v, _state), _state); } } } if( ae_fp_greater(s->tau,(double)(0)) ) { for(i=0; i<=n-1; i++) { v = 0.5*ae_sqr(x->ptr.p_double[i], _state)*s->tau*s->d.ptr.p_double[i]; *r = *r+v; *noise = ae_maxreal(*noise, eps*ae_fabs(v, _state), _state); } } /* * secondary quadratic term * * Noise from the secondary quadratic term is estimated as follows: * * noise in qi*x-r[i] is estimated as * Eps*MXQ = Eps*max(|r[i]|, |q[i,j]*x[j]|) * * noise in (qi*x-r[i])^2 is estimated as * NOISE = (|qi*x-r[i]|+Eps*MXQ)^2-(|qi*x-r[i]|)^2 * = Eps*MXQ*(2*|qi*x-r[i]|+Eps*MXQ) */ if( ae_fp_greater(s->theta,(double)(0)) ) { for(i=0; i<=s->k-1; i++) { v = 0.0; mxq = ae_fabs(s->r.ptr.p_double[i], _state); for(j=0; j<=n-1; j++) { v2 = s->q.ptr.pp_double[i][j]*x->ptr.p_double[j]; v = v+v2; mxq = ae_maxreal(mxq, ae_fabs(v2, _state), _state); } *r = *r+0.5*s->theta*ae_sqr(v-s->r.ptr.p_double[i], _state); *noise = ae_maxreal(*noise, eps*mxq*(2*ae_fabs(v-s->r.ptr.p_double[i], _state)+eps*mxq), _state); } } /* * linear term */ for(i=0; i<=s->n-1; i++) { *r = *r+x->ptr.p_double[i]*s->b.ptr.p_double[i]; *noise = ae_maxreal(*noise, eps*ae_fabs(x->ptr.p_double[i]*s->b.ptr.p_double[i], _state), _state); } /* * Final update of the noise */ *noise = n*(*noise); } /************************************************************************* This subroutine evaluates gradient of the model; active constraints are ignored. INPUT PARAMETERS: S - convex model X - point, array[N] G - possibly preallocated buffer; resized, if too small -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmgradunconstrained(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; double v; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMEvalGradUnconstrained: X is not finite vector", _state); rvectorsetlengthatleast(g, n, _state); for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = (double)(0); } /* * main quadratic term */ if( ae_fp_greater(s->alpha,(double)(0)) ) { for(i=0; i<=n-1; i++) { v = 0.0; for(j=0; j<=n-1; j++) { v = v+s->alpha*s->a.ptr.pp_double[i][j]*x->ptr.p_double[j]; } g->ptr.p_double[i] = g->ptr.p_double[i]+v; } } if( ae_fp_greater(s->tau,(double)(0)) ) { for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = g->ptr.p_double[i]+x->ptr.p_double[i]*s->tau*s->d.ptr.p_double[i]; } } /* * secondary quadratic term */ if( ae_fp_greater(s->theta,(double)(0)) ) { for(i=0; i<=s->k-1; i++) { v = ae_v_dotproduct(&s->q.ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); v = s->theta*(v-s->r.ptr.p_double[i]); ae_v_addd(&g->ptr.p_double[0], 1, &s->q.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } } /* * linear term */ for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = g->ptr.p_double[i]+s->b.ptr.p_double[i]; } } /************************************************************************* This subroutine evaluates x'*(0.5*alpha*A+tau*D)*x NOTE: Tmp[] must be preallocated array whose length is at least N -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ double cqmxtadx2(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Real */ ae_vector* tmp, ae_state *_state) { ae_int_t n; ae_int_t i; double result; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMXTADX2: X is not finite vector", _state); ae_assert(tmp->cnt>=n, "CQMXTADX2: Length(Tmp)alpha,(double)(0)) ) { result = result+s->alpha*0.5*rmatrixsyvmv(n, &s->a, 0, 0, ae_true, x, 0, tmp, _state); } if( ae_fp_greater(s->tau,(double)(0)) ) { for(i=0; i<=n-1; i++) { result = result+0.5*ae_sqr(x->ptr.p_double[i], _state)*s->tau*s->d.ptr.p_double[i]; } } return result; } /************************************************************************* This subroutine evaluates (0.5*alpha*A+tau*D)*x Y is automatically resized if needed -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmadx(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Real */ ae_vector* y, ae_state *_state) { ae_int_t n; ae_int_t i; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMEval: X is not finite vector", _state); rvectorsetlengthatleast(y, n, _state); /* * main quadratic term */ for(i=0; i<=n-1; i++) { y->ptr.p_double[i] = (double)(0); } if( ae_fp_greater(s->alpha,(double)(0)) ) { rmatrixsymv(n, s->alpha, &s->a, 0, 0, ae_true, x, 0, 1.0, y, 0, _state); } if( ae_fp_greater(s->tau,(double)(0)) ) { for(i=0; i<=n-1; i++) { y->ptr.p_double[i] = y->ptr.p_double[i]+x->ptr.p_double[i]*s->tau*s->d.ptr.p_double[i]; } } } /************************************************************************* This subroutine finds optimum of the model. It returns False on failure (indefinite/semidefinite matrix). Optimum is found subject to active constraints. INPUT PARAMETERS S - model X - possibly preallocated buffer; automatically resized, if too small enough. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ ae_bool cqmconstrainedoptimum(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t nfree; ae_int_t k; ae_int_t i; double v; ae_int_t cidx0; ae_int_t itidx; ae_bool result; /* * Rebuild internal structures */ if( !cqmodels_cqmrebuild(s, _state) ) { result = ae_false; return result; } n = s->n; k = s->k; nfree = s->nfree; result = ae_true; /* * Calculate initial point for the iterative refinement: * * free components are set to zero * * constrained components are set to their constrained values */ rvectorsetlengthatleast(x, n, _state); for(i=0; i<=n-1; i++) { if( s->activeset.ptr.p_bool[i] ) { x->ptr.p_double[i] = s->xc.ptr.p_double[i]; } else { x->ptr.p_double[i] = (double)(0); } } /* * Iterative refinement. * * In an ideal world without numerical errors it would be enough * to make just one Newton step from initial point: * x_new = -H^(-1)*grad(x=0) * However, roundoff errors can significantly deteriorate quality * of the solution. So we have to recalculate gradient and to * perform Newton steps several times. * * Below we perform fixed number of Newton iterations. */ for(itidx=0; itidx<=cqmodels_newtonrefinementits-1; itidx++) { /* * Calculate gradient at the current point. * Move free components of the gradient in the beginning. */ cqmgradunconstrained(s, x, &s->tmpg, _state); cidx0 = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { s->tmpg.ptr.p_double[cidx0] = s->tmpg.ptr.p_double[i]; cidx0 = cidx0+1; } } /* * Free components of the extrema are calculated in the first NFree elements of TXC. * * First, we have to calculate original Newton step, without rank-K perturbations */ ae_v_moveneg(&s->txc.ptr.p_double[0], 1, &s->tmpg.ptr.p_double[0], 1, ae_v_len(0,nfree-1)); cqmodels_cqmsolveea(s, &s->txc, &s->tmp0, _state); /* * Then, we account for rank-K correction. * Woodbury matrix identity is used. */ if( s->k>0&&ae_fp_greater(s->theta,(double)(0)) ) { rvectorsetlengthatleast(&s->tmp0, ae_maxint(nfree, k, _state), _state); rvectorsetlengthatleast(&s->tmp1, ae_maxint(nfree, k, _state), _state); ae_v_moveneg(&s->tmp1.ptr.p_double[0], 1, &s->tmpg.ptr.p_double[0], 1, ae_v_len(0,nfree-1)); cqmodels_cqmsolveea(s, &s->tmp1, &s->tmp0, _state); for(i=0; i<=k-1; i++) { v = ae_v_dotproduct(&s->eq.ptr.pp_double[i][0], 1, &s->tmp1.ptr.p_double[0], 1, ae_v_len(0,nfree-1)); s->tmp0.ptr.p_double[i] = v; } fblscholeskysolve(&s->eccm, 1.0, k, ae_true, &s->tmp0, &s->tmp1, _state); for(i=0; i<=nfree-1; i++) { s->tmp1.ptr.p_double[i] = 0.0; } for(i=0; i<=k-1; i++) { v = s->tmp0.ptr.p_double[i]; ae_v_addd(&s->tmp1.ptr.p_double[0], 1, &s->eq.ptr.pp_double[i][0], 1, ae_v_len(0,nfree-1), v); } cqmodels_cqmsolveea(s, &s->tmp1, &s->tmp0, _state); ae_v_sub(&s->txc.ptr.p_double[0], 1, &s->tmp1.ptr.p_double[0], 1, ae_v_len(0,nfree-1)); } /* * Unpack components from TXC into X. We pass through all * free components of X and add our step. */ cidx0 = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { x->ptr.p_double[i] = x->ptr.p_double[i]+s->txc.ptr.p_double[cidx0]; cidx0 = cidx0+1; } } } return result; } /************************************************************************* This function scales vector by multiplying it by inverse of the diagonal of the Hessian matrix. It should be used to accelerate steepest descent phase of the QP solver. Although it is called "scale-grad", it can be called for any vector, whether it is gradient, anti-gradient, or just some vector. This function does NOT takes into account current set of constraints, it just performs matrix-vector multiplication without taking into account constraints. INPUT PARAMETERS: S - model X - vector to scale OUTPUT PARAMETERS: X - scaled vector NOTE: when called for non-SPD matrices, it silently skips components of X which correspond to zero or negative diagonal elements. NOTE: this function uses diagonals of A and D; it ignores Q - rank-K term of the quadratic model. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ void cqmscalevector(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t i; double v; n = s->n; for(i=0; i<=n-1; i++) { v = 0.0; if( ae_fp_greater(s->alpha,(double)(0)) ) { v = v+s->a.ptr.pp_double[i][i]; } if( ae_fp_greater(s->tau,(double)(0)) ) { v = v+s->d.ptr.p_double[i]; } if( ae_fp_greater(v,(double)(0)) ) { x->ptr.p_double[i] = x->ptr.p_double[i]/v; } } } /************************************************************************* This function returns diagonal of the A-term. INPUT PARAMETERS: S - model OUTPUT PARAMETERS: D - diagonal of the A (or zero) -- ALGLIB -- Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ void cqmgetdiaga(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t i; n = s->n; rvectorsetlengthatleast(x, n, _state); for(i=0; i<=n-1; i++) { if( ae_fp_greater(s->alpha,(double)(0)) ) { x->ptr.p_double[i] = s->a.ptr.pp_double[i][i]; } else { x->ptr.p_double[i] = (double)(0); } } } /************************************************************************* This subroutine calls CQMRebuild() and evaluates model at X subject to active constraints. It is intended for debug purposes only, because it evaluates model by means of temporaries, which were calculated by CQMRebuild(). The only purpose of this function is to check correctness of CQMRebuild() by comparing results of this function with ones obtained by CQMEval(), which is used as reference point. The idea is that significant deviation in results of these two functions is evidence of some error in the CQMRebuild(). NOTE: suffix T denotes that temporaries marked by T-prefix are used. There is one more variant of this function, which uses "effective" model built by CQMRebuild(). NOTE2: in case CQMRebuild() fails (due to model non-convexity), this function returns NAN. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ double cqmdebugconstrainedevalt(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t nfree; ae_int_t i; ae_int_t j; double v; double result; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMDebugConstrainedEvalT: X is not finite vector", _state); if( !cqmodels_cqmrebuild(s, _state) ) { result = _state->v_nan; return result; } result = 0.0; nfree = s->nfree; /* * Reorder variables */ j = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { ae_assert(jtxc.ptr.p_double[j] = x->ptr.p_double[i]; j = j+1; } } /* * TQ2, TQ1, TQ0 * */ if( ae_fp_greater(s->alpha,(double)(0)) ) { /* * Dense TQ2 */ for(i=0; i<=nfree-1; i++) { for(j=0; j<=nfree-1; j++) { result = result+0.5*s->txc.ptr.p_double[i]*s->tq2dense.ptr.pp_double[i][j]*s->txc.ptr.p_double[j]; } } } else { /* * Diagonal TQ2 */ for(i=0; i<=nfree-1; i++) { result = result+0.5*s->tq2diag.ptr.p_double[i]*ae_sqr(s->txc.ptr.p_double[i], _state); } } for(i=0; i<=nfree-1; i++) { result = result+s->tq1.ptr.p_double[i]*s->txc.ptr.p_double[i]; } result = result+s->tq0; /* * TK2, TK1, TK0 */ if( s->k>0&&ae_fp_greater(s->theta,(double)(0)) ) { for(i=0; i<=s->k-1; i++) { v = (double)(0); for(j=0; j<=nfree-1; j++) { v = v+s->tk2.ptr.pp_double[i][j]*s->txc.ptr.p_double[j]; } result = result+0.5*ae_sqr(v, _state); } for(i=0; i<=nfree-1; i++) { result = result+s->tk1.ptr.p_double[i]*s->txc.ptr.p_double[i]; } result = result+s->tk0; } /* * TB (Bf and Bc parts) */ for(i=0; i<=n-1; i++) { result = result+s->tb.ptr.p_double[i]*s->txc.ptr.p_double[i]; } return result; } /************************************************************************* This subroutine calls CQMRebuild() and evaluates model at X subject to active constraints. It is intended for debug purposes only, because it evaluates model by means of "effective" matrices built by CQMRebuild(). The only purpose of this function is to check correctness of CQMRebuild() by comparing results of this function with ones obtained by CQMEval(), which is used as reference point. The idea is that significant deviation in results of these two functions is evidence of some error in the CQMRebuild(). NOTE: suffix E denotes that effective matrices. There is one more variant of this function, which uses temporary matrices built by CQMRebuild(). NOTE2: in case CQMRebuild() fails (due to model non-convexity), this function returns NAN. -- ALGLIB -- Copyright 12.06.2012 by Bochkanov Sergey *************************************************************************/ double cqmdebugconstrainedevale(convexquadraticmodel* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t nfree; ae_int_t i; ae_int_t j; double v; double result; n = s->n; ae_assert(isfinitevector(x, n, _state), "CQMDebugConstrainedEvalE: X is not finite vector", _state); if( !cqmodels_cqmrebuild(s, _state) ) { result = _state->v_nan; return result; } result = 0.0; nfree = s->nfree; /* * Reorder variables */ j = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { ae_assert(jtxc.ptr.p_double[j] = x->ptr.p_double[i]; j = j+1; } } /* * ECA */ ae_assert((s->ecakind==0||s->ecakind==1)||(s->ecakind==-1&&nfree==0), "CQMDebugConstrainedEvalE: unexpected ECAKind", _state); if( s->ecakind==0 ) { /* * Dense ECA */ for(i=0; i<=nfree-1; i++) { v = 0.0; for(j=i; j<=nfree-1; j++) { v = v+s->ecadense.ptr.pp_double[i][j]*s->txc.ptr.p_double[j]; } result = result+0.5*ae_sqr(v, _state); } } if( s->ecakind==1 ) { /* * Diagonal ECA */ for(i=0; i<=nfree-1; i++) { result = result+0.5*ae_sqr(s->ecadiag.ptr.p_double[i]*s->txc.ptr.p_double[i], _state); } } /* * EQ */ for(i=0; i<=s->k-1; i++) { v = 0.0; for(j=0; j<=nfree-1; j++) { v = v+s->eq.ptr.pp_double[i][j]*s->txc.ptr.p_double[j]; } result = result+0.5*ae_sqr(v, _state); } /* * EB */ for(i=0; i<=nfree-1; i++) { result = result+s->eb.ptr.p_double[i]*s->txc.ptr.p_double[i]; } /* * EC */ result = result+s->ec; return result; } /************************************************************************* Internal function, rebuilds "effective" model subject to constraints. Returns False on failure (non-SPD main quadratic term) -- ALGLIB -- Copyright 10.05.2011 by Bochkanov Sergey *************************************************************************/ static ae_bool cqmodels_cqmrebuild(convexquadraticmodel* s, ae_state *_state) { ae_int_t n; ae_int_t nfree; ae_int_t k; ae_int_t i; ae_int_t j; ae_int_t ridx0; ae_int_t ridx1; ae_int_t cidx0; ae_int_t cidx1; double v; ae_bool result; if( ae_fp_eq(s->alpha,(double)(0))&&ae_fp_eq(s->tau,(double)(0)) ) { /* * Non-SPD model, quick exit */ result = ae_false; return result; } result = ae_true; n = s->n; k = s->k; /* * Determine number of free variables. * Fill TXC - array whose last N-NFree elements store constraints. */ if( s->isactivesetchanged ) { s->nfree = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { s->nfree = s->nfree+1; } } j = s->nfree; for(i=0; i<=n-1; i++) { if( s->activeset.ptr.p_bool[i] ) { s->txc.ptr.p_double[j] = s->xc.ptr.p_double[i]; j = j+1; } } } nfree = s->nfree; /* * Re-evaluate TQ2/TQ1/TQ0, if needed */ if( s->isactivesetchanged||s->ismaintermchanged ) { /* * Handle cases Alpha>0 and Alpha=0 separately: * * in the first case we have dense matrix * * in the second one we have diagonal matrix, which can be * handled more efficiently */ if( ae_fp_greater(s->alpha,(double)(0)) ) { /* * Alpha>0, dense QP * * Split variables into two groups - free (F) and constrained (C). Reorder * variables in such way that free vars come first, constrained are last: * x = [xf, xc]. * * Main quadratic term x'*(alpha*A+tau*D)*x now splits into quadratic part, * linear part and constant part: * ( alpha*Aff+tau*Df alpha*Afc ) ( xf ) * 0.5*( xf' xc' )*( )*( ) = * ( alpha*Acf alpha*Acc+tau*Dc ) ( xc ) * * = 0.5*xf'*(alpha*Aff+tau*Df)*xf + (alpha*Afc*xc)'*xf + 0.5*xc'(alpha*Acc+tau*Dc)*xc * * We store these parts into temporary variables: * * alpha*Aff+tau*Df, alpha*Afc, alpha*Acc+tau*Dc are stored into upper * triangle of TQ2 * * alpha*Afc*xc is stored into TQ1 * * 0.5*xc'(alpha*Acc+tau*Dc)*xc is stored into TQ0 * * Below comes first part of the work - generation of TQ2: * * we pass through rows of A and copy I-th row into upper block (Aff/Afc) or * lower one (Acf/Acc) of TQ2, depending on presence of X[i] in the active set. * RIdx0 variable contains current position for insertion into upper block, * RIdx1 contains current position for insertion into lower one. * * within each row, we copy J-th element into left half (Aff/Acf) or right * one (Afc/Acc), depending on presence of X[j] in the active set. CIdx0 * contains current position for insertion into left block, CIdx1 contains * position for insertion into right one. * * during copying, we multiply elements by alpha and add diagonal matrix D. */ ridx0 = 0; ridx1 = s->nfree; for(i=0; i<=n-1; i++) { cidx0 = 0; cidx1 = s->nfree; for(j=0; j<=n-1; j++) { if( !s->activeset.ptr.p_bool[i]&&!s->activeset.ptr.p_bool[j] ) { /* * Element belongs to Aff */ v = s->alpha*s->a.ptr.pp_double[i][j]; if( i==j&&ae_fp_greater(s->tau,(double)(0)) ) { v = v+s->tau*s->d.ptr.p_double[i]; } s->tq2dense.ptr.pp_double[ridx0][cidx0] = v; } if( !s->activeset.ptr.p_bool[i]&&s->activeset.ptr.p_bool[j] ) { /* * Element belongs to Afc */ s->tq2dense.ptr.pp_double[ridx0][cidx1] = s->alpha*s->a.ptr.pp_double[i][j]; } if( s->activeset.ptr.p_bool[i]&&!s->activeset.ptr.p_bool[j] ) { /* * Element belongs to Acf */ s->tq2dense.ptr.pp_double[ridx1][cidx0] = s->alpha*s->a.ptr.pp_double[i][j]; } if( s->activeset.ptr.p_bool[i]&&s->activeset.ptr.p_bool[j] ) { /* * Element belongs to Acc */ v = s->alpha*s->a.ptr.pp_double[i][j]; if( i==j&&ae_fp_greater(s->tau,(double)(0)) ) { v = v+s->tau*s->d.ptr.p_double[i]; } s->tq2dense.ptr.pp_double[ridx1][cidx1] = v; } if( s->activeset.ptr.p_bool[j] ) { cidx1 = cidx1+1; } else { cidx0 = cidx0+1; } } if( s->activeset.ptr.p_bool[i] ) { ridx1 = ridx1+1; } else { ridx0 = ridx0+1; } } /* * Now we have TQ2, and we can evaluate TQ1. * In the special case when we have Alpha=0, NFree=0 or NFree=N, * TQ1 is filled by zeros. */ for(i=0; i<=n-1; i++) { s->tq1.ptr.p_double[i] = 0.0; } if( s->nfree>0&&s->nfreenfree, n-s->nfree, &s->tq2dense, 0, s->nfree, 0, &s->txc, s->nfree, &s->tq1, 0, _state); } /* * And finally, we evaluate TQ0. */ v = 0.0; for(i=s->nfree; i<=n-1; i++) { for(j=s->nfree; j<=n-1; j++) { v = v+0.5*s->txc.ptr.p_double[i]*s->tq2dense.ptr.pp_double[i][j]*s->txc.ptr.p_double[j]; } } s->tq0 = v; } else { /* * Alpha=0, diagonal QP * * Split variables into two groups - free (F) and constrained (C). Reorder * variables in such way that free vars come first, constrained are last: * x = [xf, xc]. * * Main quadratic term x'*(tau*D)*x now splits into quadratic and constant * parts: * ( tau*Df ) ( xf ) * 0.5*( xf' xc' )*( )*( ) = * ( tau*Dc ) ( xc ) * * = 0.5*xf'*(tau*Df)*xf + 0.5*xc'(tau*Dc)*xc * * We store these parts into temporary variables: * * tau*Df is stored in TQ2Diag * * 0.5*xc'(tau*Dc)*xc is stored into TQ0 */ s->tq0 = 0.0; ridx0 = 0; for(i=0; i<=n-1; i++) { if( !s->activeset.ptr.p_bool[i] ) { s->tq2diag.ptr.p_double[ridx0] = s->tau*s->d.ptr.p_double[i]; ridx0 = ridx0+1; } else { s->tq0 = s->tq0+0.5*s->tau*s->d.ptr.p_double[i]*ae_sqr(s->xc.ptr.p_double[i], _state); } } for(i=0; i<=n-1; i++) { s->tq1.ptr.p_double[i] = 0.0; } } } /* * Re-evaluate TK2/TK1/TK0, if needed */ if( s->isactivesetchanged||s->issecondarytermchanged ) { /* * Split variables into two groups - free (F) and constrained (C). Reorder * variables in such way that free vars come first, constrained are last: * x = [xf, xc]. * * Secondary term theta*(Q*x-r)'*(Q*x-r) now splits into quadratic part, * linear part and constant part: * ( ( xf ) )' ( ( xf ) ) * 0.5*theta*( (Qf Qc)'*( ) - r ) * ( (Qf Qc)'*( ) - r ) = * ( ( xc ) ) ( ( xc ) ) * * = 0.5*theta*xf'*(Qf'*Qf)*xf + theta*((Qc*xc-r)'*Qf)*xf + * + theta*(-r'*(Qc*xc-r)-0.5*r'*r+0.5*xc'*Qc'*Qc*xc) * * We store these parts into temporary variables: * * sqrt(theta)*Qf is stored into TK2 * * theta*((Qc*xc-r)'*Qf) is stored into TK1 * * theta*(-r'*(Qc*xc-r)-0.5*r'*r+0.5*xc'*Qc'*Qc*xc) is stored into TK0 * * We use several other temporaries to store intermediate results: * * Tmp0 - to store Qc*xc-r * * Tmp1 - to store Qc*xc * * Generation of TK2/TK1/TK0 is performed as follows: * * we fill TK2/TK1/TK0 (to handle K=0 or Theta=0) * * other steps are performed only for K>0 and Theta>0 * * we pass through columns of Q and copy I-th column into left block (Qf) or * right one (Qc) of TK2, depending on presence of X[i] in the active set. * CIdx0 variable contains current position for insertion into upper block, * CIdx1 contains current position for insertion into lower one. * * we calculate Qc*xc-r and store it into Tmp0 * * we calculate TK0 and TK1 * * we multiply leading part of TK2 which stores Qf by sqrt(theta) * it is important to perform this step AFTER calculation of TK0 and TK1, * because we need original (non-modified) Qf to calculate TK0 and TK1. */ for(j=0; j<=n-1; j++) { for(i=0; i<=k-1; i++) { s->tk2.ptr.pp_double[i][j] = 0.0; } s->tk1.ptr.p_double[j] = 0.0; } s->tk0 = 0.0; if( s->k>0&&ae_fp_greater(s->theta,(double)(0)) ) { /* * Split Q into Qf and Qc * Calculate Qc*xc-r, store in Tmp0 */ rvectorsetlengthatleast(&s->tmp0, k, _state); rvectorsetlengthatleast(&s->tmp1, k, _state); cidx0 = 0; cidx1 = nfree; for(i=0; i<=k-1; i++) { s->tmp1.ptr.p_double[i] = 0.0; } for(j=0; j<=n-1; j++) { if( s->activeset.ptr.p_bool[j] ) { for(i=0; i<=k-1; i++) { s->tk2.ptr.pp_double[i][cidx1] = s->q.ptr.pp_double[i][j]; s->tmp1.ptr.p_double[i] = s->tmp1.ptr.p_double[i]+s->q.ptr.pp_double[i][j]*s->txc.ptr.p_double[cidx1]; } cidx1 = cidx1+1; } else { for(i=0; i<=k-1; i++) { s->tk2.ptr.pp_double[i][cidx0] = s->q.ptr.pp_double[i][j]; } cidx0 = cidx0+1; } } for(i=0; i<=k-1; i++) { s->tmp0.ptr.p_double[i] = s->tmp1.ptr.p_double[i]-s->r.ptr.p_double[i]; } /* * Calculate TK0 */ v = 0.0; for(i=0; i<=k-1; i++) { v = v+s->theta*(0.5*ae_sqr(s->tmp1.ptr.p_double[i], _state)-s->r.ptr.p_double[i]*s->tmp0.ptr.p_double[i]-0.5*ae_sqr(s->r.ptr.p_double[i], _state)); } s->tk0 = v; /* * Calculate TK1 */ if( nfree>0 ) { for(i=0; i<=k-1; i++) { v = s->theta*s->tmp0.ptr.p_double[i]; ae_v_addd(&s->tk1.ptr.p_double[0], 1, &s->tk2.ptr.pp_double[i][0], 1, ae_v_len(0,nfree-1), v); } } /* * Calculate TK2 */ if( nfree>0 ) { v = ae_sqrt(s->theta, _state); for(i=0; i<=k-1; i++) { ae_v_muld(&s->tk2.ptr.pp_double[i][0], 1, ae_v_len(0,nfree-1), v); } } } } /* * Re-evaluate TB */ if( s->isactivesetchanged||s->islineartermchanged ) { ridx0 = 0; ridx1 = nfree; for(i=0; i<=n-1; i++) { if( s->activeset.ptr.p_bool[i] ) { s->tb.ptr.p_double[ridx1] = s->b.ptr.p_double[i]; ridx1 = ridx1+1; } else { s->tb.ptr.p_double[ridx0] = s->b.ptr.p_double[i]; ridx0 = ridx0+1; } } } /* * Compose ECA: either dense ECA or diagonal ECA */ if( (s->isactivesetchanged||s->ismaintermchanged)&&nfree>0 ) { if( ae_fp_greater(s->alpha,(double)(0)) ) { /* * Dense ECA */ s->ecakind = 0; for(i=0; i<=nfree-1; i++) { for(j=i; j<=nfree-1; j++) { s->ecadense.ptr.pp_double[i][j] = s->tq2dense.ptr.pp_double[i][j]; } } if( !spdmatrixcholeskyrec(&s->ecadense, 0, nfree, ae_true, &s->tmp0, _state) ) { result = ae_false; return result; } } else { /* * Diagonal ECA */ s->ecakind = 1; for(i=0; i<=nfree-1; i++) { if( ae_fp_less(s->tq2diag.ptr.p_double[i],(double)(0)) ) { result = ae_false; return result; } s->ecadiag.ptr.p_double[i] = ae_sqrt(s->tq2diag.ptr.p_double[i], _state); } } } /* * Compose EQ */ if( s->isactivesetchanged||s->issecondarytermchanged ) { for(i=0; i<=k-1; i++) { for(j=0; j<=nfree-1; j++) { s->eq.ptr.pp_double[i][j] = s->tk2.ptr.pp_double[i][j]; } } } /* * Calculate ECCM */ if( ((((s->isactivesetchanged||s->ismaintermchanged)||s->issecondarytermchanged)&&s->k>0)&&ae_fp_greater(s->theta,(double)(0)))&&nfree>0 ) { /* * Calculate ECCM - Cholesky factor of the "effective" capacitance * matrix CM = I + EQ*inv(EffectiveA)*EQ'. * * We calculate CM as follows: * CM = I + EQ*inv(EffectiveA)*EQ' * = I + EQ*ECA^(-1)*ECA^(-T)*EQ' * = I + (EQ*ECA^(-1))*(EQ*ECA^(-1))' * * Then we perform Cholesky decomposition of CM. */ rmatrixsetlengthatleast(&s->tmp2, k, n, _state); rmatrixcopy(k, nfree, &s->eq, 0, 0, &s->tmp2, 0, 0, _state); ae_assert(s->ecakind==0||s->ecakind==1, "CQMRebuild: unexpected ECAKind", _state); if( s->ecakind==0 ) { rmatrixrighttrsm(k, nfree, &s->ecadense, 0, 0, ae_true, ae_false, 0, &s->tmp2, 0, 0, _state); } if( s->ecakind==1 ) { for(i=0; i<=k-1; i++) { for(j=0; j<=nfree-1; j++) { s->tmp2.ptr.pp_double[i][j] = s->tmp2.ptr.pp_double[i][j]/s->ecadiag.ptr.p_double[j]; } } } for(i=0; i<=k-1; i++) { for(j=0; j<=k-1; j++) { s->eccm.ptr.pp_double[i][j] = 0.0; } s->eccm.ptr.pp_double[i][i] = 1.0; } rmatrixsyrk(k, nfree, 1.0, &s->tmp2, 0, 0, 0, 1.0, &s->eccm, 0, 0, ae_true, _state); if( !spdmatrixcholeskyrec(&s->eccm, 0, k, ae_true, &s->tmp0, _state) ) { result = ae_false; return result; } } /* * Compose EB and EC * * NOTE: because these quantities are cheap to compute, we do not * use caching here. */ for(i=0; i<=nfree-1; i++) { s->eb.ptr.p_double[i] = s->tq1.ptr.p_double[i]+s->tk1.ptr.p_double[i]+s->tb.ptr.p_double[i]; } s->ec = s->tq0+s->tk0; for(i=nfree; i<=n-1; i++) { s->ec = s->ec+s->tb.ptr.p_double[i]*s->txc.ptr.p_double[i]; } /* * Change cache status - everything is cached */ s->ismaintermchanged = ae_false; s->issecondarytermchanged = ae_false; s->islineartermchanged = ae_false; s->isactivesetchanged = ae_false; return result; } /************************************************************************* Internal function, solves system Effective_A*x = b. It should be called after successful completion of CQMRebuild(). INPUT PARAMETERS: S - quadratic model, after call to CQMRebuild() X - right part B, array[S.NFree] Tmp - temporary array, automatically reallocated if needed OUTPUT PARAMETERS: X - solution, array[S.NFree] NOTE: when called with zero S.NFree, returns silently NOTE: this function assumes that EA is non-degenerate -- ALGLIB -- Copyright 10.05.2011 by Bochkanov Sergey *************************************************************************/ static void cqmodels_cqmsolveea(convexquadraticmodel* s, /* Real */ ae_vector* x, /* Real */ ae_vector* tmp, ae_state *_state) { ae_int_t i; ae_assert((s->ecakind==0||s->ecakind==1)||(s->ecakind==-1&&s->nfree==0), "CQMSolveEA: unexpected ECAKind", _state); if( s->ecakind==0 ) { /* * Dense ECA, use FBLSCholeskySolve() dense solver. */ fblscholeskysolve(&s->ecadense, 1.0, s->nfree, ae_true, x, tmp, _state); } if( s->ecakind==1 ) { /* * Diagonal ECA */ for(i=0; i<=s->nfree-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]/ae_sqr(s->ecadiag.ptr.p_double[i], _state); } } } void _convexquadraticmodel_init(void* _p, ae_state *_state, ae_bool make_automatic) { convexquadraticmodel *p = (convexquadraticmodel*)_p; ae_touch_ptr((void*)p); ae_matrix_init(&p->a, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->q, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->r, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->activeset, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->tq2dense, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tk2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tq2diag, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tq1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tk1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->txc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tb, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->ecadense, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->eq, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->eccm, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ecadiag, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->eb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpg, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmp2, 0, 0, DT_REAL, _state, make_automatic); } void _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { convexquadraticmodel *dst = (convexquadraticmodel*)_dst; convexquadraticmodel *src = (convexquadraticmodel*)_src; dst->n = src->n; dst->k = src->k; dst->alpha = src->alpha; dst->tau = src->tau; dst->theta = src->theta; ae_matrix_init_copy(&dst->a, &src->a, _state, make_automatic); ae_matrix_init_copy(&dst->q, &src->q, _state, make_automatic); ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic); ae_vector_init_copy(&dst->r, &src->r, _state, make_automatic); ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->activeset, &src->activeset, _state, make_automatic); ae_matrix_init_copy(&dst->tq2dense, &src->tq2dense, _state, make_automatic); ae_matrix_init_copy(&dst->tk2, &src->tk2, _state, make_automatic); ae_vector_init_copy(&dst->tq2diag, &src->tq2diag, _state, make_automatic); ae_vector_init_copy(&dst->tq1, &src->tq1, _state, make_automatic); ae_vector_init_copy(&dst->tk1, &src->tk1, _state, make_automatic); dst->tq0 = src->tq0; dst->tk0 = src->tk0; ae_vector_init_copy(&dst->txc, &src->txc, _state, make_automatic); ae_vector_init_copy(&dst->tb, &src->tb, _state, make_automatic); dst->nfree = src->nfree; dst->ecakind = src->ecakind; ae_matrix_init_copy(&dst->ecadense, &src->ecadense, _state, make_automatic); ae_matrix_init_copy(&dst->eq, &src->eq, _state, make_automatic); ae_matrix_init_copy(&dst->eccm, &src->eccm, _state, make_automatic); ae_vector_init_copy(&dst->ecadiag, &src->ecadiag, _state, make_automatic); ae_vector_init_copy(&dst->eb, &src->eb, _state, make_automatic); dst->ec = src->ec; ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmpg, &src->tmpg, _state, make_automatic); ae_matrix_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); dst->ismaintermchanged = src->ismaintermchanged; dst->issecondarytermchanged = src->issecondarytermchanged; dst->islineartermchanged = src->islineartermchanged; dst->isactivesetchanged = src->isactivesetchanged; } void _convexquadraticmodel_clear(void* _p) { convexquadraticmodel *p = (convexquadraticmodel*)_p; ae_touch_ptr((void*)p); ae_matrix_clear(&p->a); ae_matrix_clear(&p->q); ae_vector_clear(&p->b); ae_vector_clear(&p->r); ae_vector_clear(&p->xc); ae_vector_clear(&p->d); ae_vector_clear(&p->activeset); ae_matrix_clear(&p->tq2dense); ae_matrix_clear(&p->tk2); ae_vector_clear(&p->tq2diag); ae_vector_clear(&p->tq1); ae_vector_clear(&p->tk1); ae_vector_clear(&p->txc); ae_vector_clear(&p->tb); ae_matrix_clear(&p->ecadense); ae_matrix_clear(&p->eq); ae_matrix_clear(&p->eccm); ae_vector_clear(&p->ecadiag); ae_vector_clear(&p->eb); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmpg); ae_matrix_clear(&p->tmp2); } void _convexquadraticmodel_destroy(void* _p) { convexquadraticmodel *p = (convexquadraticmodel*)_p; ae_touch_ptr((void*)p); ae_matrix_destroy(&p->a); ae_matrix_destroy(&p->q); ae_vector_destroy(&p->b); ae_vector_destroy(&p->r); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->d); ae_vector_destroy(&p->activeset); ae_matrix_destroy(&p->tq2dense); ae_matrix_destroy(&p->tk2); ae_vector_destroy(&p->tq2diag); ae_vector_destroy(&p->tq1); ae_vector_destroy(&p->tk1); ae_vector_destroy(&p->txc); ae_vector_destroy(&p->tb); ae_matrix_destroy(&p->ecadense); ae_matrix_destroy(&p->eq); ae_matrix_destroy(&p->eccm); ae_vector_destroy(&p->ecadiag); ae_vector_destroy(&p->eb); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmpg); ae_matrix_destroy(&p->tmp2); } #endif #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This subroutine initializes "internal" OptGuard report, i.e. one intended for internal use by optimizers. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void optguardinitinternal(optguardreport* rep, ae_int_t n, ae_int_t k, ae_state *_state) { rep->nonc0suspected = ae_false; rep->nonc0test0positive = ae_false; rep->nonc0lipschitzc = (double)(0); rep->nonc0fidx = -1; rep->nonc1suspected = ae_false; rep->nonc1test0positive = ae_false; rep->nonc1test1positive = ae_false; rep->nonc1lipschitzc = (double)(0); rep->nonc1fidx = -1; rep->badgradsuspected = ae_false; rep->badgradfidx = -1; rep->badgradvidx = -1; } /************************************************************************* This subroutine exports report to user-readable representation (all arrays are forced to have exactly same size as needed; unused arrays are set to zero length). -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void optguardexportreport(optguardreport* srcrep, ae_int_t n, ae_int_t k, ae_bool badgradhasxj, optguardreport* dstrep, ae_state *_state) { ae_int_t i; ae_int_t j; dstrep->nonc0suspected = srcrep->nonc0suspected; dstrep->nonc0test0positive = srcrep->nonc0test0positive; if( srcrep->nonc0suspected ) { dstrep->nonc0lipschitzc = srcrep->nonc0lipschitzc; dstrep->nonc0fidx = srcrep->nonc0fidx; } else { dstrep->nonc0lipschitzc = (double)(0); dstrep->nonc0fidx = -1; } dstrep->nonc1suspected = srcrep->nonc1suspected; dstrep->nonc1test0positive = srcrep->nonc1test0positive; dstrep->nonc1test1positive = srcrep->nonc1test1positive; if( srcrep->nonc1suspected ) { dstrep->nonc1lipschitzc = srcrep->nonc1lipschitzc; dstrep->nonc1fidx = srcrep->nonc1fidx; } else { dstrep->nonc1lipschitzc = (double)(0); dstrep->nonc1fidx = -1; } dstrep->badgradsuspected = srcrep->badgradsuspected; if( srcrep->badgradsuspected ) { dstrep->badgradfidx = srcrep->badgradfidx; dstrep->badgradvidx = srcrep->badgradvidx; } else { dstrep->badgradfidx = -1; dstrep->badgradvidx = -1; } if( badgradhasxj ) { ae_vector_set_length(&dstrep->badgradxbase, n, _state); for(j=0; j<=n-1; j++) { dstrep->badgradxbase.ptr.p_double[j] = srcrep->badgradxbase.ptr.p_double[j]; } ae_matrix_set_length(&dstrep->badgraduser, k, n, _state); ae_matrix_set_length(&dstrep->badgradnum, k, n, _state); for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { dstrep->badgraduser.ptr.pp_double[i][j] = srcrep->badgraduser.ptr.pp_double[i][j]; dstrep->badgradnum.ptr.pp_double[i][j] = srcrep->badgradnum.ptr.pp_double[i][j]; } } } else { ae_vector_set_length(&dstrep->badgradxbase, 0, _state); ae_matrix_set_length(&dstrep->badgraduser, 0, 0, _state); ae_matrix_set_length(&dstrep->badgradnum, 0, 0, _state); } } /************************************************************************* This subroutine exports report to user-readable representation (all arrays are forced to have exactly same size as needed; unused arrays are set to zero length). NOTE: we assume that SrcRep contains scaled X0[] and D[], i.e. explicit variable scaling was applied. We need to rescale them during export, that's why we need S[] parameter. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorexportc1test0report(optguardnonc1test0report* srcrep, /* Real */ ae_vector* s, optguardnonc1test0report* dstrep, ae_state *_state) { ae_int_t i; dstrep->positive = srcrep->positive; if( srcrep->positive ) { dstrep->stpidxa = srcrep->stpidxa; dstrep->stpidxb = srcrep->stpidxb; dstrep->fidx = srcrep->fidx; dstrep->cnt = srcrep->cnt; dstrep->n = srcrep->n; ae_vector_set_length(&dstrep->x0, srcrep->n, _state); ae_vector_set_length(&dstrep->d, srcrep->n, _state); for(i=0; i<=srcrep->n-1; i++) { dstrep->x0.ptr.p_double[i] = srcrep->x0.ptr.p_double[i]*s->ptr.p_double[i]; dstrep->d.ptr.p_double[i] = srcrep->d.ptr.p_double[i]*s->ptr.p_double[i]; } ae_vector_set_length(&dstrep->stp, srcrep->cnt, _state); ae_vector_set_length(&dstrep->f, srcrep->cnt, _state); for(i=0; i<=srcrep->cnt-1; i++) { dstrep->stp.ptr.p_double[i] = srcrep->stp.ptr.p_double[i]; dstrep->f.ptr.p_double[i] = srcrep->f.ptr.p_double[i]; } } else { dstrep->stpidxa = -1; dstrep->stpidxb = -1; dstrep->fidx = -1; dstrep->cnt = 0; dstrep->n = 0; ae_vector_set_length(&dstrep->x0, 0, _state); ae_vector_set_length(&dstrep->d, 0, _state); ae_vector_set_length(&dstrep->stp, 0, _state); ae_vector_set_length(&dstrep->f, 0, _state); } } /************************************************************************* This subroutine exports report to user-readable representation (all arrays are forced to have exactly same size as needed; unused arrays are set to zero length). NOTE: we assume that SrcRep contains scaled X0[], D[] and G[], i.e. explicit variable scaling was applied. We need to rescale them during export, that's why we need S[] parameter. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorexportc1test1report(optguardnonc1test1report* srcrep, /* Real */ ae_vector* s, optguardnonc1test1report* dstrep, ae_state *_state) { ae_int_t i; dstrep->positive = srcrep->positive; if( srcrep->positive ) { ae_assert(srcrep->vidx>=0&&srcrep->vidxn, "SmoothnessMonitorExportC1Test1Report: integrity check failed", _state); dstrep->stpidxa = srcrep->stpidxa; dstrep->stpidxb = srcrep->stpidxb; dstrep->fidx = srcrep->fidx; dstrep->vidx = srcrep->vidx; dstrep->cnt = srcrep->cnt; dstrep->n = srcrep->n; ae_vector_set_length(&dstrep->x0, srcrep->n, _state); ae_vector_set_length(&dstrep->d, srcrep->n, _state); for(i=0; i<=srcrep->n-1; i++) { dstrep->x0.ptr.p_double[i] = srcrep->x0.ptr.p_double[i]*s->ptr.p_double[i]; dstrep->d.ptr.p_double[i] = srcrep->d.ptr.p_double[i]*s->ptr.p_double[i]; } ae_vector_set_length(&dstrep->stp, srcrep->cnt, _state); ae_vector_set_length(&dstrep->g, srcrep->cnt, _state); for(i=0; i<=srcrep->cnt-1; i++) { dstrep->stp.ptr.p_double[i] = srcrep->stp.ptr.p_double[i]; dstrep->g.ptr.p_double[i] = srcrep->g.ptr.p_double[i]/s->ptr.p_double[srcrep->vidx]; } } else { dstrep->stpidxa = -1; dstrep->stpidxb = -1; dstrep->fidx = -1; dstrep->vidx = -1; dstrep->cnt = 0; dstrep->n = 0; ae_vector_set_length(&dstrep->x0, 0, _state); ae_vector_set_length(&dstrep->d, 0, _state); ae_vector_set_length(&dstrep->stp, 0, _state); ae_vector_set_length(&dstrep->g, 0, _state); } } /************************************************************************* Returns True when all flags are clear. Intended for easy coding of unit tests. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ ae_bool optguardallclear(optguardreport* rep, ae_state *_state) { ae_bool result; result = !((rep->badgradsuspected||rep->nonc0suspected)||rep->nonc1suspected); return result; } void _optguardreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { optguardreport *p = (optguardreport*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->badgradxbase, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->badgraduser, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->badgradnum, 0, 0, DT_REAL, _state, make_automatic); } void _optguardreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { optguardreport *dst = (optguardreport*)_dst; optguardreport *src = (optguardreport*)_src; dst->nonc0suspected = src->nonc0suspected; dst->nonc0test0positive = src->nonc0test0positive; dst->nonc0fidx = src->nonc0fidx; dst->nonc0lipschitzc = src->nonc0lipschitzc; dst->nonc1suspected = src->nonc1suspected; dst->nonc1test0positive = src->nonc1test0positive; dst->nonc1test1positive = src->nonc1test1positive; dst->nonc1fidx = src->nonc1fidx; dst->nonc1lipschitzc = src->nonc1lipschitzc; dst->badgradsuspected = src->badgradsuspected; dst->badgradfidx = src->badgradfidx; dst->badgradvidx = src->badgradvidx; ae_vector_init_copy(&dst->badgradxbase, &src->badgradxbase, _state, make_automatic); ae_matrix_init_copy(&dst->badgraduser, &src->badgraduser, _state, make_automatic); ae_matrix_init_copy(&dst->badgradnum, &src->badgradnum, _state, make_automatic); } void _optguardreport_clear(void* _p) { optguardreport *p = (optguardreport*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->badgradxbase); ae_matrix_clear(&p->badgraduser); ae_matrix_clear(&p->badgradnum); } void _optguardreport_destroy(void* _p) { optguardreport *p = (optguardreport*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->badgradxbase); ae_matrix_destroy(&p->badgraduser); ae_matrix_destroy(&p->badgradnum); } void _optguardnonc0report_init(void* _p, ae_state *_state, ae_bool make_automatic) { optguardnonc0report *p = (optguardnonc0report*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->f, 0, DT_REAL, _state, make_automatic); } void _optguardnonc0report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { optguardnonc0report *dst = (optguardnonc0report*)_dst; optguardnonc0report *src = (optguardnonc0report*)_src; dst->positive = src->positive; dst->fidx = src->fidx; ae_vector_init_copy(&dst->x0, &src->x0, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->n = src->n; ae_vector_init_copy(&dst->stp, &src->stp, _state, make_automatic); ae_vector_init_copy(&dst->f, &src->f, _state, make_automatic); dst->cnt = src->cnt; dst->stpidxa = src->stpidxa; dst->stpidxb = src->stpidxb; } void _optguardnonc0report_clear(void* _p) { optguardnonc0report *p = (optguardnonc0report*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x0); ae_vector_clear(&p->d); ae_vector_clear(&p->stp); ae_vector_clear(&p->f); } void _optguardnonc0report_destroy(void* _p) { optguardnonc0report *p = (optguardnonc0report*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x0); ae_vector_destroy(&p->d); ae_vector_destroy(&p->stp); ae_vector_destroy(&p->f); } void _optguardnonc1test0report_init(void* _p, ae_state *_state, ae_bool make_automatic) { optguardnonc1test0report *p = (optguardnonc1test0report*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->f, 0, DT_REAL, _state, make_automatic); } void _optguardnonc1test0report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { optguardnonc1test0report *dst = (optguardnonc1test0report*)_dst; optguardnonc1test0report *src = (optguardnonc1test0report*)_src; dst->positive = src->positive; dst->fidx = src->fidx; ae_vector_init_copy(&dst->x0, &src->x0, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->n = src->n; ae_vector_init_copy(&dst->stp, &src->stp, _state, make_automatic); ae_vector_init_copy(&dst->f, &src->f, _state, make_automatic); dst->cnt = src->cnt; dst->stpidxa = src->stpidxa; dst->stpidxb = src->stpidxb; } void _optguardnonc1test0report_clear(void* _p) { optguardnonc1test0report *p = (optguardnonc1test0report*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x0); ae_vector_clear(&p->d); ae_vector_clear(&p->stp); ae_vector_clear(&p->f); } void _optguardnonc1test0report_destroy(void* _p) { optguardnonc1test0report *p = (optguardnonc1test0report*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x0); ae_vector_destroy(&p->d); ae_vector_destroy(&p->stp); ae_vector_destroy(&p->f); } void _optguardnonc1test1report_init(void* _p, ae_state *_state, ae_bool make_automatic) { optguardnonc1test1report *p = (optguardnonc1test1report*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); } void _optguardnonc1test1report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { optguardnonc1test1report *dst = (optguardnonc1test1report*)_dst; optguardnonc1test1report *src = (optguardnonc1test1report*)_src; dst->positive = src->positive; dst->fidx = src->fidx; dst->vidx = src->vidx; ae_vector_init_copy(&dst->x0, &src->x0, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->n = src->n; ae_vector_init_copy(&dst->stp, &src->stp, _state, make_automatic); ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->cnt = src->cnt; dst->stpidxa = src->stpidxa; dst->stpidxb = src->stpidxb; } void _optguardnonc1test1report_clear(void* _p) { optguardnonc1test1report *p = (optguardnonc1test1report*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x0); ae_vector_clear(&p->d); ae_vector_clear(&p->stp); ae_vector_clear(&p->g); } void _optguardnonc1test1report_destroy(void* _p) { optguardnonc1test1report *p = (optguardnonc1test1report*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x0); ae_vector_destroy(&p->d); ae_vector_destroy(&p->stp); ae_vector_destroy(&p->g); } #endif #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This subroutine checks violation of the box constraints. On output it sets bcerr to the maximum scaled violation, bcidx to the index of the violating constraint. if bcerr=0 (say, if no constraints are violated) then bcidx=-1. If nonunits=false then s[] is not referenced at all (assumed unit). -- ALGLIB -- Copyright 7.11.2018 by Bochkanov Sergey *************************************************************************/ void checkbcviolation(/* Boolean */ ae_vector* hasbndl, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* bndu, /* Real */ ae_vector* x, ae_int_t n, /* Real */ ae_vector* s, ae_bool nonunits, double* bcerr, ae_int_t* bcidx, ae_state *_state) { ae_int_t i; double vs; double ve; *bcerr = 0; *bcidx = 0; *bcerr = (double)(0); *bcidx = -1; for(i=0; i<=n-1; i++) { /* * Fetch scale */ if( nonunits ) { vs = 1/s->ptr.p_double[i]; } else { vs = (double)(1); } /* * Check lower bound */ if( hasbndl->ptr.p_bool[i]&&x->ptr.p_double[i]ptr.p_double[i] ) { ve = (bndl->ptr.p_double[i]-x->ptr.p_double[i])*vs; if( ve>*bcerr ) { *bcerr = ve; *bcidx = i; } } /* * Check upper bound */ if( hasbndu->ptr.p_bool[i]&&x->ptr.p_double[i]>bndu->ptr.p_double[i] ) { ve = (x->ptr.p_double[i]-bndu->ptr.p_double[i])*vs; if( ve>*bcerr ) { *bcerr = ve; *bcidx = i; } } } } /************************************************************************* This subroutine checks violation of the general linear constraints. Constraints are assumed to be un-normalized and stored in the format "NEC equality ones followed by NIC inequality ones". On output it sets lcerr to the maximum scaled violation, lcidx to the source index of the most violating constraint (row indexes of CLEIC are mapped to the indexes of the "original" constraints via LCSrcIdx[] array. if lcerr=0 (say, if no constraints are violated) then lcidx=-1. If nonunits=false then s[] is not referenced at all (assumed unit). -- ALGLIB -- Copyright 7.11.2018 by Bochkanov Sergey *************************************************************************/ void checklcviolation(/* Real */ ae_matrix* cleic, /* Integer */ ae_vector* lcsrcidx, ae_int_t nec, ae_int_t nic, /* Real */ ae_vector* x, ae_int_t n, double* lcerr, ae_int_t* lcidx, ae_state *_state) { ae_int_t i; ae_int_t j; double cx; double cnrm; double v; *lcerr = 0; *lcidx = 0; *lcerr = (double)(0); *lcidx = -1; for(i=0; i<=nec+nic-1; i++) { cx = -cleic->ptr.pp_double[i][n]; cnrm = (double)(0); for(j=0; j<=n-1; j++) { v = cleic->ptr.pp_double[i][j]; cx = cx+v*x->ptr.p_double[j]; cnrm = cnrm+v*v; } cnrm = ae_sqrt(cnrm, _state); cx = cx/coalesce(cnrm, (double)(1), _state); if( i*lcerr ) { *lcerr = cx; *lcidx = lcsrcidx->ptr.p_int[i]; } } } /************************************************************************* This subroutine checks violation of the nonlinear constraints. Fi[0] is the target value (ignored), Fi[1:NG+NH] are values of nonlinear constraints. On output it sets nlcerr to the scaled violation, nlcidx to the index of the most violating constraint in [0,NG+NH-1] range. if nlcerr=0 (say, if no constraints are violated) then nlcidx=-1. If nonunits=false then s[] is not referenced at all (assumed unit). -- ALGLIB -- Copyright 7.11.2018 by Bochkanov Sergey *************************************************************************/ void checknlcviolation(/* Real */ ae_vector* fi, ae_int_t ng, ae_int_t nh, double* nlcerr, ae_int_t* nlcidx, ae_state *_state) { ae_int_t i; double v; *nlcerr = 0; *nlcidx = 0; *nlcerr = (double)(0); *nlcidx = -1; for(i=0; i<=ng+nh-1; i++) { v = fi->ptr.p_double[i+1]; if( i*nlcerr ) { *nlcerr = v; *nlcidx = i; } } } /************************************************************************* This subroutine is same as CheckNLCViolation, but is works with scaled constraints: it assumes that Fi[] were divided by FScales[] vector BEFORE passing them to this function. The function checks scaled values, but reports unscaled errors. -- ALGLIB -- Copyright 7.11.2018 by Bochkanov Sergey *************************************************************************/ void unscaleandchecknlcviolation(/* Real */ ae_vector* fi, /* Real */ ae_vector* fscales, ae_int_t ng, ae_int_t nh, double* nlcerr, ae_int_t* nlcidx, ae_state *_state) { ae_int_t i; double v; *nlcerr = 0; *nlcidx = 0; *nlcerr = (double)(0); *nlcidx = -1; for(i=0; i<=ng+nh-1; i++) { ae_assert(ae_fp_greater(fscales->ptr.p_double[i+1],(double)(0)), "UnscaleAndCheckNLCViolation: integrity check failed", _state); v = fi->ptr.p_double[i+1]*fscales->ptr.p_double[i+1]; if( i*nlcerr ) { *nlcerr = v; *nlcidx = i; } } } /************************************************************************* This subroutine is used to prepare threshold value which will be used for trimming of the target function (see comments on TrimFunction() for more information). This function accepts only one parameter: function value at the starting point. It returns threshold which will be used for trimming. -- ALGLIB -- Copyright 10.05.2011 by Bochkanov Sergey *************************************************************************/ void trimprepare(double f, double* threshold, ae_state *_state) { *threshold = 0; *threshold = 10*(ae_fabs(f, _state)+1); } /************************************************************************* This subroutine is used to "trim" target function, i.e. to do following transformation: { {F,G} if F=Threshold Such transformation allows us to solve problems with singularities by redefining function in such way that it becomes bounded from above. -- ALGLIB -- Copyright 10.05.2011 by Bochkanov Sergey *************************************************************************/ void trimfunction(double* f, /* Real */ ae_vector* g, ae_int_t n, double threshold, ae_state *_state) { ae_int_t i; if( ae_fp_greater_eq(*f,threshold) ) { *f = threshold; for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = 0.0; } } } /************************************************************************* This function enforces boundary constraints in the X. This function correctly (although a bit inefficient) handles BL[i] which are -INF and BU[i] which are +INF. We have NMain+NSlack dimensional X, with first NMain components bounded by BL/BU, and next NSlack ones bounded by non-negativity constraints. INPUT PARAMETERS X - array[NMain+NSlack], point BL - array[NMain], lower bounds (may contain -INF, when bound is not present) HaveBL - array[NMain], if HaveBL[i] is False, then i-th bound is not present BU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBU - array[NMain], if HaveBU[i] is False, then i-th bound is not present OUTPUT PARAMETERS X - X with all constraints being enforced It returns True when constraints are consistent, False - when constraints are inconsistent. -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ ae_bool enforceboundaryconstraints(/* Real */ ae_vector* x, /* Real */ ae_vector* bl, /* Boolean */ ae_vector* havebl, /* Real */ ae_vector* bu, /* Boolean */ ae_vector* havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state) { ae_int_t i; ae_bool result; result = ae_false; for(i=0; i<=nmain-1; i++) { if( (havebl->ptr.p_bool[i]&&havebu->ptr.p_bool[i])&&ae_fp_greater(bl->ptr.p_double[i],bu->ptr.p_double[i]) ) { return result; } if( havebl->ptr.p_bool[i]&&ae_fp_less(x->ptr.p_double[i],bl->ptr.p_double[i]) ) { x->ptr.p_double[i] = bl->ptr.p_double[i]; } if( havebu->ptr.p_bool[i]&&ae_fp_greater(x->ptr.p_double[i],bu->ptr.p_double[i]) ) { x->ptr.p_double[i] = bu->ptr.p_double[i]; } } for(i=0; i<=nslack-1; i++) { if( ae_fp_less(x->ptr.p_double[nmain+i],(double)(0)) ) { x->ptr.p_double[nmain+i] = (double)(0); } } result = ae_true; return result; } /************************************************************************* This function projects gradient into feasible area of boundary constrained optimization problem. X can be infeasible with respect to boundary constraints. We have NMain+NSlack dimensional X, with first NMain components bounded by BL/BU, and next NSlack ones bounded by non-negativity constraints. INPUT PARAMETERS X - array[NMain+NSlack], point G - array[NMain+NSlack], gradient BL - lower bounds (may contain -INF, when bound is not present) HaveBL - if HaveBL[i] is False, then i-th bound is not present BU - upper bounds (may contain +INF, when bound is not present) HaveBU - if HaveBU[i] is False, then i-th bound is not present OUTPUT PARAMETERS G - projection of G. Components of G which satisfy one of the following (1) (X[I]<=BndL[I]) and (G[I]>0), OR (2) (X[I]>=BndU[I]) and (G[I]<0) are replaced by zeros. NOTE 1: this function assumes that constraints are feasible. It throws exception otherwise. NOTE 2: in fact, projection of ANTI-gradient is calculated, because this function trims components of -G which points outside of the feasible area. However, working with -G is considered confusing, because all optimization source work with G. -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ void projectgradientintobc(/* Real */ ae_vector* x, /* Real */ ae_vector* g, /* Real */ ae_vector* bl, /* Boolean */ ae_vector* havebl, /* Real */ ae_vector* bu, /* Boolean */ ae_vector* havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state) { ae_int_t i; for(i=0; i<=nmain-1; i++) { ae_assert((!havebl->ptr.p_bool[i]||!havebu->ptr.p_bool[i])||ae_fp_less_eq(bl->ptr.p_double[i],bu->ptr.p_double[i]), "ProjectGradientIntoBC: internal error (infeasible constraints)", _state); if( (havebl->ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],bl->ptr.p_double[i]))&&ae_fp_greater(g->ptr.p_double[i],(double)(0)) ) { g->ptr.p_double[i] = (double)(0); } if( (havebu->ptr.p_bool[i]&&ae_fp_greater_eq(x->ptr.p_double[i],bu->ptr.p_double[i]))&&ae_fp_less(g->ptr.p_double[i],(double)(0)) ) { g->ptr.p_double[i] = (double)(0); } } for(i=0; i<=nslack-1; i++) { if( ae_fp_less_eq(x->ptr.p_double[nmain+i],(double)(0))&&ae_fp_greater(g->ptr.p_double[nmain+i],(double)(0)) ) { g->ptr.p_double[nmain+i] = (double)(0); } } } /************************************************************************* Given a) initial point X0[NMain+NSlack] (feasible with respect to bound constraints) b) step vector alpha*D[NMain+NSlack] c) boundary constraints BndL[NMain], BndU[NMain] d) implicit non-negativity constraints for slack variables this function calculates bound on the step length subject to boundary constraints. It returns: * MaxStepLen - such step length that X0+MaxStepLen*alpha*D is exactly at the boundary given by constraints * VariableToFreeze - index of the constraint to be activated, 0 <= VariableToFreeze < NMain+NSlack * ValueToFreeze - value of the corresponding constraint. Notes: * it is possible that several constraints can be activated by the step at once. In such cases only one constraint is returned. It is caller responsibility to check other constraints. This function makes sure that we activate at least one constraint, and everything else is the responsibility of the caller. * steps smaller than MaxStepLen still can activate constraints due to numerical errors. Thus purpose of this function is not to guard against accidental activation of the constraints - quite the reverse, its purpose is to activate at least constraint upon performing step which is too long. * in case there is no constraints to activate, we return negative VariableToFreeze and zero MaxStepLen and ValueToFreeze. * this function assumes that constraints are consistent; it throws exception otherwise. INPUT PARAMETERS X - array[NMain+NSlack], point. Must be feasible with respect to bound constraints (exception will be thrown otherwise) D - array[NMain+NSlack], step direction alpha - scalar multiplier before D, alpha<>0 BndL - lower bounds, array[NMain] (may contain -INF, when bound is not present) HaveBndL - array[NMain], if HaveBndL[i] is False, then i-th bound is not present BndU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBndU - array[NMain], if HaveBndU[i] is False, then i-th bound is not present NMain - number of main variables NSlack - number of slack variables OUTPUT PARAMETERS VariableToFreeze: * negative value = step is unbounded, ValueToFreeze=0, MaxStepLen=0. * non-negative value = at least one constraint, given by this parameter, will be activated upon performing maximum step. ValueToFreeze- value of the variable which will be constrained MaxStepLen - maximum length of the step. Can be zero when step vector looks outside of the feasible area. -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ void calculatestepbound(/* Real */ ae_vector* x, /* Real */ ae_vector* d, double alpha, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* havebndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t* variabletofreeze, double* valuetofreeze, double* maxsteplen, ae_state *_state) { ae_int_t i; double prevmax; double initval; *variabletofreeze = 0; *valuetofreeze = 0; *maxsteplen = 0; ae_assert(ae_fp_neq(alpha,(double)(0)), "CalculateStepBound: zero alpha", _state); *variabletofreeze = -1; initval = ae_maxrealnumber; *maxsteplen = initval; for(i=0; i<=nmain-1; i++) { if( havebndl->ptr.p_bool[i]&&ae_fp_less(alpha*d->ptr.p_double[i],(double)(0)) ) { ae_assert(ae_fp_greater_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]), "CalculateStepBound: infeasible X", _state); prevmax = *maxsteplen; *maxsteplen = safeminposrv(x->ptr.p_double[i]-bndl->ptr.p_double[i], -alpha*d->ptr.p_double[i], *maxsteplen, _state); if( ae_fp_less(*maxsteplen,prevmax) ) { *variabletofreeze = i; *valuetofreeze = bndl->ptr.p_double[i]; } } if( havebndu->ptr.p_bool[i]&&ae_fp_greater(alpha*d->ptr.p_double[i],(double)(0)) ) { ae_assert(ae_fp_less_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]), "CalculateStepBound: infeasible X", _state); prevmax = *maxsteplen; *maxsteplen = safeminposrv(bndu->ptr.p_double[i]-x->ptr.p_double[i], alpha*d->ptr.p_double[i], *maxsteplen, _state); if( ae_fp_less(*maxsteplen,prevmax) ) { *variabletofreeze = i; *valuetofreeze = bndu->ptr.p_double[i]; } } } for(i=0; i<=nslack-1; i++) { if( ae_fp_less(alpha*d->ptr.p_double[nmain+i],(double)(0)) ) { ae_assert(ae_fp_greater_eq(x->ptr.p_double[nmain+i],(double)(0)), "CalculateStepBound: infeasible X", _state); prevmax = *maxsteplen; *maxsteplen = safeminposrv(x->ptr.p_double[nmain+i], -alpha*d->ptr.p_double[nmain+i], *maxsteplen, _state); if( ae_fp_less(*maxsteplen,prevmax) ) { *variabletofreeze = nmain+i; *valuetofreeze = (double)(0); } } } if( ae_fp_eq(*maxsteplen,initval) ) { *valuetofreeze = (double)(0); *maxsteplen = (double)(0); } } /************************************************************************* This function postprocesses bounded step by: * analysing step length (whether it is equal to MaxStepLen) and activating constraint given by VariableToFreeze if needed * checking for additional bound constraints to activate This function uses final point of the step, quantities calculated by the CalculateStepBound() function. As result, it returns point which is exactly feasible with respect to boundary constraints. NOTE 1: this function does NOT handle and check linear equality constraints NOTE 2: when StepTaken=MaxStepLen we always activate at least one constraint INPUT PARAMETERS X - array[NMain+NSlack], final point to postprocess XPrev - array[NMain+NSlack], initial point BndL - lower bounds, array[NMain] (may contain -INF, when bound is not present) HaveBndL - array[NMain], if HaveBndL[i] is False, then i-th bound is not present BndU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBndU - array[NMain], if HaveBndU[i] is False, then i-th bound is not present NMain - number of main variables NSlack - number of slack variables VariableToFreeze-result of CalculateStepBound() ValueToFreeze- result of CalculateStepBound() StepTaken - actual step length (actual step is equal to the possibly non-unit step direction vector times this parameter). StepTaken<=MaxStepLen. MaxStepLen - result of CalculateStepBound() OUTPUT PARAMETERS X - point bounded with respect to constraints. components corresponding to active constraints are exactly equal to the boundary values. RESULT: number of constraints activated in addition to previously active ones. Constraints which were DEACTIVATED are ignored (do not influence function value). -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ ae_int_t postprocessboundedstep(/* Real */ ae_vector* x, /* Real */ ae_vector* xprev, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* havebndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t variabletofreeze, double valuetofreeze, double steptaken, double maxsteplen, ae_state *_state) { ae_int_t i; ae_bool wasactivated; ae_int_t result; ae_assert(variabletofreeze<0||ae_fp_less_eq(steptaken,maxsteplen), "Assertion failed", _state); /* * Activate constraints */ if( variabletofreeze>=0&&ae_fp_eq(steptaken,maxsteplen) ) { x->ptr.p_double[variabletofreeze] = valuetofreeze; } for(i=0; i<=nmain-1; i++) { if( havebndl->ptr.p_bool[i]&&ae_fp_less(x->ptr.p_double[i],bndl->ptr.p_double[i]) ) { x->ptr.p_double[i] = bndl->ptr.p_double[i]; } if( havebndu->ptr.p_bool[i]&&ae_fp_greater(x->ptr.p_double[i],bndu->ptr.p_double[i]) ) { x->ptr.p_double[i] = bndu->ptr.p_double[i]; } } for(i=0; i<=nslack-1; i++) { if( ae_fp_less_eq(x->ptr.p_double[nmain+i],(double)(0)) ) { x->ptr.p_double[nmain+i] = (double)(0); } } /* * Calculate number of constraints being activated */ result = 0; for(i=0; i<=nmain-1; i++) { wasactivated = ae_fp_neq(x->ptr.p_double[i],xprev->ptr.p_double[i])&&((havebndl->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]))||(havebndu->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]))); wasactivated = wasactivated||variabletofreeze==i; if( wasactivated ) { result = result+1; } } for(i=0; i<=nslack-1; i++) { wasactivated = ae_fp_neq(x->ptr.p_double[nmain+i],xprev->ptr.p_double[nmain+i])&&ae_fp_eq(x->ptr.p_double[nmain+i],0.0); wasactivated = wasactivated||variabletofreeze==nmain+i; if( wasactivated ) { result = result+1; } } return result; } /************************************************************************* The purpose of this function is to prevent algorithm from "unsticking" from the active bound constraints because of numerical noise in the gradient or Hessian. It is done by zeroing some components of the search direction D. D[i] is zeroed when both (a) and (b) are true: a) corresponding X[i] is exactly at the boundary b) |D[i]*S[i]| <= DropTol*Sqrt(SUM(D[i]^2*S[I]^2)) D can be step direction , antigradient, gradient, or anything similar. Sign of D does not matter, nor matters step length. NOTE 1: boundary constraints are expected to be consistent, as well as X is expected to be feasible. Exception will be thrown otherwise. INPUT PARAMETERS D - array[NMain+NSlack], direction X - array[NMain+NSlack], current point BndL - lower bounds, array[NMain] (may contain -INF, when bound is not present) HaveBndL - array[NMain], if HaveBndL[i] is False, then i-th bound is not present BndU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBndU - array[NMain], if HaveBndU[i] is False, then i-th bound is not present S - array[NMain+NSlack], scaling of the variables NMain - number of main variables NSlack - number of slack variables DropTol - drop tolerance, >=0 OUTPUT PARAMETERS X - point bounded with respect to constraints. components corresponding to active constraints are exactly equal to the boundary values. -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ void filterdirection(/* Real */ ae_vector* d, /* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* havebndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* havebndu, /* Real */ ae_vector* s, ae_int_t nmain, ae_int_t nslack, double droptol, ae_state *_state) { ae_int_t i; double scalednorm; ae_bool isactive; scalednorm = 0.0; for(i=0; i<=nmain+nslack-1; i++) { scalednorm = scalednorm+ae_sqr(d->ptr.p_double[i]*s->ptr.p_double[i], _state); } scalednorm = ae_sqrt(scalednorm, _state); for(i=0; i<=nmain-1; i++) { ae_assert(!havebndl->ptr.p_bool[i]||ae_fp_greater_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]), "FilterDirection: infeasible point", _state); ae_assert(!havebndu->ptr.p_bool[i]||ae_fp_less_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]), "FilterDirection: infeasible point", _state); isactive = (havebndl->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]))||(havebndu->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndu->ptr.p_double[i])); if( isactive&&ae_fp_less_eq(ae_fabs(d->ptr.p_double[i]*s->ptr.p_double[i], _state),droptol*scalednorm) ) { d->ptr.p_double[i] = 0.0; } } for(i=0; i<=nslack-1; i++) { ae_assert(ae_fp_greater_eq(x->ptr.p_double[nmain+i],(double)(0)), "FilterDirection: infeasible point", _state); if( ae_fp_eq(x->ptr.p_double[nmain+i],(double)(0))&&ae_fp_less_eq(ae_fabs(d->ptr.p_double[nmain+i]*s->ptr.p_double[nmain+i], _state),droptol*scalednorm) ) { d->ptr.p_double[nmain+i] = 0.0; } } } /************************************************************************* This function returns number of bound constraints whose state was changed (either activated or deactivated) when making step from XPrev to X. Constraints are considered: * active - when we are exactly at the boundary * inactive - when we are not at the boundary You should note that antigradient direction is NOT taken into account when we make decions on the constraint status. INPUT PARAMETERS X - array[NMain+NSlack], final point. Must be feasible with respect to bound constraints. XPrev - array[NMain+NSlack], initial point. Must be feasible with respect to bound constraints. BndL - lower bounds, array[NMain] (may contain -INF, when bound is not present) HaveBndL - array[NMain], if HaveBndL[i] is False, then i-th bound is not present BndU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBndU - array[NMain], if HaveBndU[i] is False, then i-th bound is not present NMain - number of main variables NSlack - number of slack variables RESULT: number of constraints whose state was changed. -- ALGLIB -- Copyright 10.01.2012 by Bochkanov Sergey *************************************************************************/ ae_int_t numberofchangedconstraints(/* Real */ ae_vector* x, /* Real */ ae_vector* xprev, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* havebndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* havebndu, ae_int_t nmain, ae_int_t nslack, ae_state *_state) { ae_int_t i; ae_bool statuschanged; ae_int_t result; result = 0; for(i=0; i<=nmain-1; i++) { if( ae_fp_neq(x->ptr.p_double[i],xprev->ptr.p_double[i]) ) { statuschanged = ae_false; if( havebndl->ptr.p_bool[i]&&(ae_fp_eq(x->ptr.p_double[i],bndl->ptr.p_double[i])||ae_fp_eq(xprev->ptr.p_double[i],bndl->ptr.p_double[i])) ) { statuschanged = ae_true; } if( havebndu->ptr.p_bool[i]&&(ae_fp_eq(x->ptr.p_double[i],bndu->ptr.p_double[i])||ae_fp_eq(xprev->ptr.p_double[i],bndu->ptr.p_double[i])) ) { statuschanged = ae_true; } if( statuschanged ) { result = result+1; } } } for(i=0; i<=nslack-1; i++) { if( ae_fp_neq(x->ptr.p_double[nmain+i],xprev->ptr.p_double[nmain+i])&&(ae_fp_eq(x->ptr.p_double[nmain+i],(double)(0))||ae_fp_eq(xprev->ptr.p_double[nmain+i],(double)(0))) ) { result = result+1; } } return result; } /************************************************************************* This function finds feasible point of (NMain+NSlack)-dimensional problem subject to NMain explicit boundary constraints (some constraints can be omitted), NSlack implicit non-negativity constraints, K linear equality constraints. INPUT PARAMETERS X - array[NMain+NSlack], initial point. BndL - lower bounds, array[NMain] (may contain -INF, when bound is not present) HaveBndL - array[NMain], if HaveBndL[i] is False, then i-th bound is not present BndU - array[NMain], upper bounds (may contain +INF, when bound is not present) HaveBndU - array[NMain], if HaveBndU[i] is False, then i-th bound is not present NMain - number of main variables NSlack - number of slack variables CE - array[K,NMain+NSlack+1], equality constraints CE*x=b. Rows contain constraints, first NMain+NSlack columns contain coefficients before X[], last column contain right part. K - number of linear constraints EpsI - infeasibility (error in the right part) allowed in the solution OUTPUT PARAMETERS: X - feasible point or best infeasible point found before algorithm termination QPIts - number of QP iterations (for debug purposes) GPAIts - number of GPA iterations (for debug purposes) RESULT: True in case X is feasible, False - if it is infeasible. -- ALGLIB -- Copyright 20.01.2012 by Bochkanov Sergey *************************************************************************/ ae_bool findfeasiblepoint(/* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* havebndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* havebndu, ae_int_t nmain, ae_int_t nslack, /* Real */ ae_matrix* ce, ae_int_t k, double epsi, ae_int_t* qpits, ae_int_t* gpaits, ae_state *_state) { ae_frame _frame_block; ae_matrix _ce; ae_int_t i; ae_int_t j; ae_int_t idx0; ae_int_t idx1; ae_vector permx; ae_vector xn; ae_vector xa; ae_vector newtonstep; ae_vector g; ae_vector pg; ae_vector tau; ae_vector s; double armijostep; double armijobeststep; double armijobestfeas; double v; double vv; double mx; double feaserr; double feaserr0; double feaserr1; double feasold; double feasnew; double pgnorm; double vn; double vd; double stp; ae_int_t vartofreeze; double valtofreeze; double maxsteplen; ae_bool werechangesinconstraints; ae_bool stage1isover; ae_bool converged; ae_vector activeconstraints; ae_vector tmpk; ae_vector colnorms; ae_int_t nactive; ae_int_t nfree; ae_vector p1; ae_vector p2; apbuffers buf; ae_int_t itscount; ae_int_t itswithintolerance; ae_int_t maxitswithintolerance; ae_int_t badits; ae_int_t maxbadits; ae_int_t gparuns; ae_int_t maxarmijoruns; ae_matrix permce; ae_matrix q; ae_bool result; ae_frame_make(_state, &_frame_block); memset(&_ce, 0, sizeof(_ce)); memset(&permx, 0, sizeof(permx)); memset(&xn, 0, sizeof(xn)); memset(&xa, 0, sizeof(xa)); memset(&newtonstep, 0, sizeof(newtonstep)); memset(&g, 0, sizeof(g)); memset(&pg, 0, sizeof(pg)); memset(&tau, 0, sizeof(tau)); memset(&s, 0, sizeof(s)); memset(&activeconstraints, 0, sizeof(activeconstraints)); memset(&tmpk, 0, sizeof(tmpk)); memset(&colnorms, 0, sizeof(colnorms)); memset(&p1, 0, sizeof(p1)); memset(&p2, 0, sizeof(p2)); memset(&buf, 0, sizeof(buf)); memset(&permce, 0, sizeof(permce)); memset(&q, 0, sizeof(q)); ae_matrix_init_copy(&_ce, ce, _state, ae_true); ce = &_ce; *qpits = 0; *gpaits = 0; ae_vector_init(&permx, 0, DT_REAL, _state, ae_true); ae_vector_init(&xn, 0, DT_REAL, _state, ae_true); ae_vector_init(&xa, 0, DT_REAL, _state, ae_true); ae_vector_init(&newtonstep, 0, DT_REAL, _state, ae_true); ae_vector_init(&g, 0, DT_REAL, _state, ae_true); ae_vector_init(&pg, 0, DT_REAL, _state, ae_true); ae_vector_init(&tau, 0, DT_REAL, _state, ae_true); ae_vector_init(&s, 0, DT_REAL, _state, ae_true); ae_vector_init(&activeconstraints, 0, DT_REAL, _state, ae_true); ae_vector_init(&tmpk, 0, DT_REAL, _state, ae_true); ae_vector_init(&colnorms, 0, DT_REAL, _state, ae_true); ae_vector_init(&p1, 0, DT_INT, _state, ae_true); ae_vector_init(&p2, 0, DT_INT, _state, ae_true); _apbuffers_init(&buf, _state, ae_true); ae_matrix_init(&permce, 0, 0, DT_REAL, _state, ae_true); ae_matrix_init(&q, 0, 0, DT_REAL, _state, ae_true); maxitswithintolerance = 3; maxbadits = 3; maxarmijoruns = 5; *qpits = 0; *gpaits = 0; /* * Initial enforcement of the feasibility with respect to boundary constraints * NOTE: after this block we assume that boundary constraints are consistent. */ if( !enforceboundaryconstraints(x, bndl, havebndl, bndu, havebndu, nmain, nslack, _state) ) { result = ae_false; ae_frame_leave(_state); return result; } if( k==0 ) { /* * No linear constraints, we can exit right now */ result = ae_true; ae_frame_leave(_state); return result; } /* * Scale rows of CE in such way that max(CE[i,0..nmain+nslack-1])=1 for any i=0..k-1 */ for(i=0; i<=k-1; i++) { v = 0.0; for(j=0; j<=nmain+nslack-1; j++) { v = ae_maxreal(v, ae_fabs(ce->ptr.pp_double[i][j], _state), _state); } if( ae_fp_neq(v,(double)(0)) ) { v = 1/v; ae_v_muld(&ce->ptr.pp_double[i][0], 1, ae_v_len(0,nmain+nslack), v); } } /* * Allocate temporaries */ ae_vector_set_length(&xn, nmain+nslack, _state); ae_vector_set_length(&xa, nmain+nslack, _state); ae_vector_set_length(&permx, nmain+nslack, _state); ae_vector_set_length(&g, nmain+nslack, _state); ae_vector_set_length(&pg, nmain+nslack, _state); ae_vector_set_length(&tmpk, k, _state); ae_matrix_set_length(&permce, k, nmain+nslack, _state); ae_vector_set_length(&activeconstraints, nmain+nslack, _state); ae_vector_set_length(&newtonstep, nmain+nslack, _state); ae_vector_set_length(&s, nmain+nslack, _state); ae_vector_set_length(&colnorms, nmain+nslack, _state); for(i=0; i<=nmain+nslack-1; i++) { s.ptr.p_double[i] = 1.0; colnorms.ptr.p_double[i] = 0.0; for(j=0; j<=k-1; j++) { colnorms.ptr.p_double[i] = colnorms.ptr.p_double[i]+ae_sqr(ce->ptr.pp_double[j][i], _state); } } /* * K>0, we have linear equality constraints combined with bound constraints. * * Try to find feasible point as minimizer of the quadratic function * F(x) = 0.5*||CE*x-b||^2 = 0.5*x'*(CE'*CE)*x - (b'*CE)*x + 0.5*b'*b * subject to boundary constraints given by BL, BU and non-negativity of * the slack variables. BTW, we drop constant term because it does not * actually influences on the solution. * * Below we will assume that K>0. */ itswithintolerance = 0; badits = 0; itscount = 0; for(;;) { /* * Stage 0: check for exact convergence */ converged = ae_true; feaserr = optserv_feasibilityerror(ce, x, nmain, nslack, k, &tmpk, _state); for(i=0; i<=k-1; i++) { /* * Calculate MX - maximum term in the left part * * Terminate if error in the right part is not greater than 100*Eps*MX. * * IMPORTANT: we must perform check for non-strict inequality, i.e. to use <= instead of <. * it will allow us to easily handle situations with zero rows of CE. * * NOTE: it is important to calculate feasibility error with dedicated * function. Once we had a situation when usage of "inline" code * resulted in different numerical values calculated at different * parts of program for exactly same X. However, this value is * essential for algorithm's ability to terminate before entering * infinite loop, so reproducibility of numerical results is very * important. */ mx = (double)(0); v = -ce->ptr.pp_double[i][nmain+nslack]; for(j=0; j<=nmain+nslack-1; j++) { mx = ae_maxreal(mx, ae_fabs(ce->ptr.pp_double[i][j]*x->ptr.p_double[j], _state), _state); v = v+ce->ptr.pp_double[i][j]*x->ptr.p_double[j]; } converged = converged&&ae_fp_less_eq(ae_fabs(v, _state),100*ae_machineepsilon*mx); } feaserr0 = feaserr; if( converged ) { result = ae_fp_less_eq(feaserr,epsi); ae_frame_leave(_state); return result; } /* * Stage 1: equality constrained quadratic programming * * * treat active bound constraints as equality ones (constraint is considered * active when we are at the boundary, independently of the antigradient direction) * * calculate unrestricted Newton step to point XM (which may be infeasible) * calculate MaxStepLen = largest step in direction of XM which retains feasibility. * * perform bounded step from X to XN: * a) XN=XM (if XM is feasible) * b) XN=X-MaxStepLen*(XM-X) (otherwise) * * X := XN * * if XM (Newton step subject to currently active constraints) was feasible, goto Stage 2 * * repeat Stage 1 * * NOTE 1: in order to solve constrained qudratic subproblem we will have to reorder * variables in such way that ones corresponding to inactive constraints will * be first, and active ones will be last in the list. CE and X are now * [ xi ] * separated into two parts: CE = [CEi CEa], x = [ ], where CEi/Xi correspond * [ xa ] * to INACTIVE constraints, and CEa/Xa correspond to the ACTIVE ones. * * Now, instead of F=0.5*x'*(CE'*CE)*x - (b'*CE)*x + 0.5*b'*b, we have * F(xi) = 0.5*(CEi*xi,CEi*xi) + (CEa*xa-b,CEi*xi) + (0.5*CEa*xa-b,CEa*xa). * Here xa is considered constant, i.e. we optimize with respect to xi, leaving xa fixed. * * We can solve it by performing SVD of CEi and calculating pseudoinverse of the * Hessian matrix. Of course, we do NOT calculate pseudoinverse explicitly - we * just use singular vectors to perform implicit multiplication by it. * */ for(;;) { /* * Calculate G - gradient subject to equality constraints, * multiply it by inverse of the Hessian diagonal to obtain initial * step vector. * * Bound step subject to constraints which can be activated, * run Armijo search with increasing step size. * Search is terminated when feasibility error stops to decrease. * * NOTE: it is important to test for "stops to decrease" instead * of "starts to increase" in order to correctly handle cases with * zero CE. */ armijobeststep = 0.0; optserv_feasibilityerrorgrad(ce, x, nmain, nslack, k, &armijobestfeas, &g, &tmpk, _state); for(i=0; i<=nmain-1; i++) { if( havebndl->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]) ) { g.ptr.p_double[i] = 0.0; } if( havebndu->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]) ) { g.ptr.p_double[i] = 0.0; } } for(i=0; i<=nslack-1; i++) { if( ae_fp_eq(x->ptr.p_double[nmain+i],0.0) ) { g.ptr.p_double[nmain+i] = 0.0; } } v = 0.0; for(i=0; i<=nmain+nslack-1; i++) { if( ae_fp_neq(ae_sqr(colnorms.ptr.p_double[i], _state),(double)(0)) ) { newtonstep.ptr.p_double[i] = -g.ptr.p_double[i]/ae_sqr(colnorms.ptr.p_double[i], _state); } else { newtonstep.ptr.p_double[i] = 0.0; } v = v+ae_sqr(newtonstep.ptr.p_double[i], _state); } if( ae_fp_eq(v,(double)(0)) ) { /* * Constrained gradient is zero, QP iterations are over */ break; } calculatestepbound(x, &newtonstep, 1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state); if( vartofreeze>=0&&ae_fp_eq(maxsteplen,(double)(0)) ) { /* * Can not perform step, QP iterations are over */ break; } if( vartofreeze>=0 ) { armijostep = ae_minreal(1.0, maxsteplen, _state); } else { armijostep = (double)(1); } for(;;) { ae_v_move(&xa.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); ae_v_addd(&xa.ptr.p_double[0], 1, &newtonstep.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), armijostep); enforceboundaryconstraints(&xa, bndl, havebndl, bndu, havebndu, nmain, nslack, _state); feaserr = optserv_feasibilityerror(ce, &xa, nmain, nslack, k, &tmpk, _state); if( ae_fp_greater_eq(feaserr,armijobestfeas) ) { break; } armijobestfeas = feaserr; armijobeststep = armijostep; armijostep = 2.0*armijostep; } ae_v_addd(&x->ptr.p_double[0], 1, &newtonstep.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), armijobeststep); enforceboundaryconstraints(x, bndl, havebndl, bndu, havebndu, nmain, nslack, _state); /* * Determine number of active and free constraints */ nactive = 0; for(i=0; i<=nmain-1; i++) { activeconstraints.ptr.p_double[i] = (double)(0); if( havebndl->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndl->ptr.p_double[i]) ) { activeconstraints.ptr.p_double[i] = (double)(1); } if( havebndu->ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]) ) { activeconstraints.ptr.p_double[i] = (double)(1); } if( ae_fp_greater(activeconstraints.ptr.p_double[i],(double)(0)) ) { nactive = nactive+1; } } for(i=0; i<=nslack-1; i++) { activeconstraints.ptr.p_double[nmain+i] = (double)(0); if( ae_fp_eq(x->ptr.p_double[nmain+i],0.0) ) { activeconstraints.ptr.p_double[nmain+i] = (double)(1); } if( ae_fp_greater(activeconstraints.ptr.p_double[nmain+i],(double)(0)) ) { nactive = nactive+1; } } nfree = nmain+nslack-nactive; if( nfree==0 ) { break; } *qpits = *qpits+1; /* * Reorder variables: CE is reordered to PermCE, X is reordered to PermX */ tagsortbuf(&activeconstraints, nmain+nslack, &p1, &p2, &buf, _state); for(i=0; i<=k-1; i++) { for(j=0; j<=nmain+nslack-1; j++) { permce.ptr.pp_double[i][j] = ce->ptr.pp_double[i][j]; } } for(j=0; j<=nmain+nslack-1; j++) { permx.ptr.p_double[j] = x->ptr.p_double[j]; } for(j=0; j<=nmain+nslack-1; j++) { if( p2.ptr.p_int[j]!=j ) { idx0 = p2.ptr.p_int[j]; idx1 = j; for(i=0; i<=k-1; i++) { v = permce.ptr.pp_double[i][idx0]; permce.ptr.pp_double[i][idx0] = permce.ptr.pp_double[i][idx1]; permce.ptr.pp_double[i][idx1] = v; } v = permx.ptr.p_double[idx0]; permx.ptr.p_double[idx0] = permx.ptr.p_double[idx1]; permx.ptr.p_double[idx1] = v; } } /* * Calculate (unprojected) gradient: * G(xi) = CEi'*(CEi*xi + CEa*xa - b) */ for(i=0; i<=nfree-1; i++) { g.ptr.p_double[i] = (double)(0); } for(i=0; i<=k-1; i++) { v = ae_v_dotproduct(&permce.ptr.pp_double[i][0], 1, &permx.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); tmpk.ptr.p_double[i] = v-ce->ptr.pp_double[i][nmain+nslack]; } for(i=0; i<=k-1; i++) { v = tmpk.ptr.p_double[i]; ae_v_addd(&g.ptr.p_double[0], 1, &permce.ptr.pp_double[i][0], 1, ae_v_len(0,nfree-1), v); } /* * Calculate Newton step using pseudoinverse PermCE: * F(xi) = 0.5*xi'*H*xi + g'*xi (Taylor decomposition) * XN = -H^(-1)*g (new point, solution of the QP subproblem) * H = CEi'*CEi * H^(-1) can be calculated via QR or LQ decomposition (see below) * step = -H^(-1)*g * * NOTE: PermCE is destroyed after this block */ for(i=0; i<=nmain+nslack-1; i++) { newtonstep.ptr.p_double[i] = (double)(0); } if( k<=nfree ) { /* * CEi = L*Q * H = Q'*L'*L*Q * inv(H) = Q'*inv(L)*inv(L')*Q * * NOTE: we apply minor regularizing perturbation to diagonal of L, * which is equal to 10*K*Eps */ rmatrixlq(&permce, k, nfree, &tau, _state); rmatrixlqunpackq(&permce, k, nfree, &tau, k, &q, _state); v = (double)(0); for(i=0; i<=k-1; i++) { v = ae_maxreal(v, ae_fabs(permce.ptr.pp_double[i][i], _state), _state); } v = coalesce(v, (double)(1), _state); for(i=0; i<=k-1; i++) { permce.ptr.pp_double[i][i] = permce.ptr.pp_double[i][i]+10*k*ae_machineepsilon*v; } rmatrixgemv(k, nfree, 1.0, &q, 0, 0, 0, &g, 0, 0.0, &tmpk, 0, _state); rmatrixtrsv(k, &permce, 0, 0, ae_false, ae_false, 1, &tmpk, 0, _state); rmatrixtrsv(k, &permce, 0, 0, ae_false, ae_false, 0, &tmpk, 0, _state); rmatrixgemv(nfree, k, -1.0, &q, 0, 0, 1, &tmpk, 0, 0.0, &newtonstep, 0, _state); } else { /* * CEi = Q*R * H = R'*R * inv(H) = inv(R)*inv(R') * * NOTE: we apply minor regularizing perturbation to diagonal of R, * which is equal to 10*K*Eps */ rmatrixqr(&permce, k, nfree, &tau, _state); v = (double)(0); for(i=0; i<=nfree-1; i++) { v = ae_maxreal(v, ae_fabs(permce.ptr.pp_double[i][i], _state), _state); } v = coalesce(v, (double)(1), _state); for(i=0; i<=nfree-1; i++) { vv = 10*nfree*ae_machineepsilon*v; if( ae_fp_less(permce.ptr.pp_double[i][i],(double)(0)) ) { vv = -vv; } permce.ptr.pp_double[i][i] = permce.ptr.pp_double[i][i]+vv; } ae_v_moveneg(&newtonstep.ptr.p_double[0], 1, &g.ptr.p_double[0], 1, ae_v_len(0,nfree-1)); rmatrixtrsv(nfree, &permce, 0, 0, ae_true, ae_false, 1, &newtonstep, 0, _state); rmatrixtrsv(nfree, &permce, 0, 0, ae_true, ae_false, 0, &newtonstep, 0, _state); } /* * Post-reordering of Newton step */ for(j=nmain+nslack-1; j>=0; j--) { if( p2.ptr.p_int[j]!=j ) { idx0 = p2.ptr.p_int[j]; idx1 = j; v = newtonstep.ptr.p_double[idx0]; newtonstep.ptr.p_double[idx0] = newtonstep.ptr.p_double[idx1]; newtonstep.ptr.p_double[idx1] = v; } } /* * NewtonStep contains Newton step subject to active bound constraints. * * Such step leads us to the minimizer of the equality constrained F, * but such minimizer may be infeasible because some constraints which * are inactive at the initial point can be violated at the solution. * * Thus, we perform optimization in two stages: * a) perform bounded Newton step, i.e. step in the Newton direction * until activation of the first constraint * b) in case (MaxStepLen>0)and(MaxStepLen<1), perform additional iteration * of the Armijo line search in the rest of the Newton direction. */ calculatestepbound(x, &newtonstep, 1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state); if( vartofreeze>=0&&ae_fp_eq(maxsteplen,(double)(0)) ) { /* * Activation of the constraints prevent us from performing step, * QP iterations are over */ break; } if( vartofreeze>=0 ) { v = ae_minreal(1.0, maxsteplen, _state); } else { v = 1.0; } ae_v_moved(&xn.ptr.p_double[0], 1, &newtonstep.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), v); ae_v_add(&xn.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); postprocessboundedstep(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, vartofreeze, valtofreeze, v, maxsteplen, _state); if( ae_fp_greater(maxsteplen,(double)(0))&&ae_fp_less(maxsteplen,(double)(1)) ) { /* * Newton step was restricted by activation of the constraints, * perform Armijo iteration. * * Initial estimate for best step is zero step. We try different * step sizes, from the 1-MaxStepLen (residual of the full Newton * step) to progressively smaller and smaller steps. */ armijobeststep = 0.0; armijobestfeas = optserv_feasibilityerror(ce, &xn, nmain, nslack, k, &tmpk, _state); armijostep = 1-maxsteplen; for(j=0; j<=maxarmijoruns-1; j++) { ae_v_move(&xa.ptr.p_double[0], 1, &xn.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); ae_v_addd(&xa.ptr.p_double[0], 1, &newtonstep.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), armijostep); enforceboundaryconstraints(&xa, bndl, havebndl, bndu, havebndu, nmain, nslack, _state); feaserr = optserv_feasibilityerror(ce, &xa, nmain, nslack, k, &tmpk, _state); if( ae_fp_less(feaserr,armijobestfeas) ) { armijobestfeas = feaserr; armijobeststep = armijostep; } armijostep = 0.5*armijostep; } ae_v_move(&xa.ptr.p_double[0], 1, &xn.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); ae_v_addd(&xa.ptr.p_double[0], 1, &newtonstep.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), armijobeststep); enforceboundaryconstraints(&xa, bndl, havebndl, bndu, havebndu, nmain, nslack, _state); } else { /* * Armijo iteration is not performed */ ae_v_move(&xa.ptr.p_double[0], 1, &xn.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); } stage1isover = ae_fp_greater_eq(maxsteplen,(double)(1))||ae_fp_eq(maxsteplen,(double)(0)); /* * Calculate feasibility errors for old and new X. * These quantinies are used for debugging purposes only. * However, we can leave them in release code because performance impact is insignificant. * * Update X. Exit if needed. */ feasold = optserv_feasibilityerror(ce, x, nmain, nslack, k, &tmpk, _state); feasnew = optserv_feasibilityerror(ce, &xa, nmain, nslack, k, &tmpk, _state); if( ae_fp_greater_eq(feasnew,feasold) ) { break; } ae_v_move(&x->ptr.p_double[0], 1, &xa.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); if( stage1isover ) { break; } } /* * Stage 2: gradient projection algorithm (GPA) * * * calculate feasibility error (with respect to linear equality constraints) * * calculate gradient G of F, project it into feasible area (G => PG) * * exit if norm(PG) is exactly zero or feasibility error is smaller than EpsC * * let XM be exact minimum of F along -PG (XM may be infeasible). * calculate MaxStepLen = largest step in direction of -PG which retains feasibility. * * perform bounded step from X to XN: * a) XN=XM (if XM is feasible) * b) XN=X-MaxStepLen*PG (otherwise) * * X := XN * * stop after specified number of iterations or when no new constraints was activated * * NOTES: * * grad(F) = (CE'*CE)*x - (b'*CE)^T * * CE[i] denotes I-th row of CE * * XM = X+stp*(-PG) where stp=(grad(F(X)),PG)/(CE*PG,CE*PG). * Here PG is a projected gradient, but in fact it can be arbitrary non-zero * direction vector - formula for minimum of F along PG still will be correct. */ werechangesinconstraints = ae_false; for(gparuns=1; gparuns<=k; gparuns++) { /* * calculate feasibility error and G */ optserv_feasibilityerrorgrad(ce, x, nmain, nslack, k, &feaserr, &g, &tmpk, _state); /* * project G, filter it (strip numerical noise) */ ae_v_move(&pg.ptr.p_double[0], 1, &g.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); projectgradientintobc(x, &pg, bndl, havebndl, bndu, havebndu, nmain, nslack, _state); filterdirection(&pg, x, bndl, havebndl, bndu, havebndu, &s, nmain, nslack, 1.0E-9, _state); for(i=0; i<=nmain+nslack-1; i++) { if( ae_fp_neq(ae_sqr(colnorms.ptr.p_double[i], _state),(double)(0)) ) { pg.ptr.p_double[i] = pg.ptr.p_double[i]/ae_sqr(colnorms.ptr.p_double[i], _state); } else { pg.ptr.p_double[i] = 0.0; } } /* * Check GNorm and feasibility. * Exit when GNorm is exactly zero. */ pgnorm = ae_v_dotproduct(&pg.ptr.p_double[0], 1, &pg.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); pgnorm = ae_sqrt(pgnorm, _state); if( ae_fp_eq(pgnorm,(double)(0)) ) { result = ae_fp_less_eq(feaserr,epsi); ae_frame_leave(_state); return result; } /* * calculate planned step length */ vn = ae_v_dotproduct(&g.ptr.p_double[0], 1, &pg.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); vd = (double)(0); rmatrixgemv(k, nmain+nslack, 1.0, ce, 0, 0, 0, &pg, 0, 0.0, &tmpk, 0, _state); for(i=0; i<=k-1; i++) { vd = vd+ae_sqr(tmpk.ptr.p_double[i], _state); } stp = vn/vd; /* * Calculate step bound. * Perform bounded step and post-process it */ calculatestepbound(x, &pg, -1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state); if( vartofreeze>=0&&ae_fp_eq(maxsteplen,(double)(0)) ) { result = ae_false; ae_frame_leave(_state); return result; } if( vartofreeze>=0 ) { v = ae_minreal(stp, maxsteplen, _state); } else { v = stp; } ae_v_move(&xn.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); ae_v_subd(&xn.ptr.p_double[0], 1, &pg.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1), v); postprocessboundedstep(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, vartofreeze, valtofreeze, v, maxsteplen, _state); /* * update X * check stopping criteria */ werechangesinconstraints = werechangesinconstraints||numberofchangedconstraints(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, _state)>0; ae_v_move(&x->ptr.p_double[0], 1, &xn.ptr.p_double[0], 1, ae_v_len(0,nmain+nslack-1)); *gpaits = *gpaits+1; if( !werechangesinconstraints ) { break; } } /* * Stage 3: decide to stop algorithm or not to stop * * 1. we can stop when last GPA run did NOT changed constraints status. * It means that we've found final set of the active constraints even * before GPA made its run. And it means that Newton step moved us to * the minimum subject to the present constraints. * Depending on feasibility error, True or False is returned. */ feaserr = optserv_feasibilityerror(ce, x, nmain, nslack, k, &tmpk, _state); feaserr1 = feaserr; if( ae_fp_greater_eq(feaserr1,feaserr0*(1-1000*ae_machineepsilon)) ) { inc(&badits, _state); } else { badits = 0; } if( ae_fp_less_eq(feaserr,epsi) ) { inc(&itswithintolerance, _state); } else { itswithintolerance = 0; } if( (!werechangesinconstraints||itswithintolerance>=maxitswithintolerance)||badits>=maxbadits ) { result = ae_fp_less_eq(feaserr,epsi); ae_frame_leave(_state); return result; } itscount = itscount+1; /* * Block below is never executed; it is necessary just to avoid * "unreachable code" warning about automatically generated code. * * We just need a way to transfer control to the end of the function, * even a fake way which is never actually traversed. */ if( alwaysfalse(_state) ) { result = ae_false; ae_assert(ae_false, "Assertion failed", _state); break; } } ae_frame_leave(_state); return result; } /************************************************************************* This function checks that input derivatives are right. First it scales parameters DF0 and DF1 from segment [A;B] to [0;1]. Then it builds Hermite spline and derivative of it in 0.5. Search scale as Max(DF0,DF1, |F0-F1|). Right derivative has to satisfy condition: |H-F|/S<=0,001, |H'-F'|/S<=0,001. INPUT PARAMETERS: F0 - function's value in X-TestStep point; DF0 - derivative's value in X-TestStep point; F1 - function's value in X+TestStep point; DF1 - derivative's value in X+TestStep point; F - testing function's value; DF - testing derivative's value; Width- width of verification segment. RESULT: If input derivatives is right then function returns true, else function returns false. -- ALGLIB -- Copyright 29.05.2012 by Bochkanov Sergey *************************************************************************/ ae_bool derivativecheck(double f0, double df0, double f1, double df1, double f, double df, double width, ae_state *_state) { double s; double h; double dh; ae_bool result; /* * Rescale input data to [0,1] */ df = width*df; df0 = width*df0; df1 = width*df1; /* * Compute error scale, two sources are used: * * magnitudes of derivatives and secants * * magnitudes of input data times sqrt(machine_epsilon) */ s = 0.0; s = ae_maxreal(s, ae_fabs(df0, _state), _state); s = ae_maxreal(s, ae_fabs(df1, _state), _state); s = ae_maxreal(s, ae_fabs(f1-f0, _state), _state); s = ae_maxreal(s, ae_sqrt(ae_machineepsilon, _state)*ae_fabs(f0, _state), _state); s = ae_maxreal(s, ae_sqrt(ae_machineepsilon, _state)*ae_fabs(f1, _state), _state); /* * Compute H and dH/dX at the middle of interval */ h = 0.5*(f0+f1)+0.125*(df0-df1); dh = 1.5*(f1-f0)-0.250*(df0+df1); /* * Check */ if( ae_fp_neq(s,(double)(0)) ) { if( ae_fp_greater(ae_fabs(h-f, _state)/s,0.001)||ae_fp_greater(ae_fabs(dh-df, _state)/s,0.001) ) { result = ae_false; return result; } } else { if( ae_fp_neq(h-f,0.0)||ae_fp_neq(dh-df,0.0) ) { result = ae_false; return result; } } result = ae_true; return result; } /************************************************************************* Having quadratic target function f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) and its parabolic model along direction D F(x0+alpha*D) = D2*alpha^2 + D1*alpha this function estimates numerical errors in the coefficients of the model. It is important that this function does NOT calculate D1/D2 - it only estimates numerical errors introduced during evaluation and compares their magnitudes against magnitudes of numerical errors. As result, one of three outcomes is returned for each coefficient: * "true" coefficient is almost surely positive * "true" coefficient is almost surely negative * numerical errors in coefficient are so large that it can not be reliably distinguished from zero INPUT PARAMETERS: AbsASum - SUM(|A[i,j]|) AbsASum2- SUM(A[i,j]^2) MB - max(|B|) MX - max(|X|) MD - max(|D|) D1 - linear coefficient D2 - quadratic coefficient OUTPUT PARAMETERS: D1Est - estimate of D1 sign, accounting for possible numerical errors: * >0 means "almost surely positive" (D1>0 and large) * <0 means "almost surely negative" (D1<0 and large) * =0 means "pessimistic estimate of numerical errors in D1 is larger than magnitude of D1 itself; it is impossible to reliably distinguish D1 from zero". D2Est - estimate of D2 sign, accounting for possible numerical errors: * >0 means "almost surely positive" (D2>0 and large) * <0 means "almost surely negative" (D2<0 and large) * =0 means "pessimistic estimate of numerical errors in D2 is larger than magnitude of D2 itself; it is impossible to reliably distinguish D2 from zero". -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ void estimateparabolicmodel(double absasum, double absasum2, double mx, double mb, double md, double d1, double d2, ae_int_t* d1est, ae_int_t* d2est, ae_state *_state) { double d1esterror; double d2esterror; double eps; double e1; double e2; *d1est = 0; *d2est = 0; /* * Error estimates: * * * error in D1=d'*(A*x+b) is estimated as * ED1 = eps*MAX_ABS(D)*(MAX_ABS(X)*ENORM(A)+MAX_ABS(B)) * * error in D2=0.5*d'*A*d is estimated as * ED2 = eps*MAX_ABS(D)^2*ENORM(A) * * Here ENORM(A) is some pseudo-norm which reflects the way numerical * error accumulates during addition. Two ways of accumulation are * possible - worst case (errors always increase) and mean-case (errors * may cancel each other). We calculate geometrical average of both: * * ENORM_WORST(A) = SUM(|A[i,j]|) error in N-term sum grows as O(N) * * ENORM_MEAN(A) = SQRT(SUM(A[i,j]^2)) error in N-term sum grows as O(sqrt(N)) * * ENORM(A) = SQRT(ENORM_WORST(A),ENORM_MEAN(A)) */ eps = 4*ae_machineepsilon; e1 = eps*md*(mx*absasum+mb); e2 = eps*md*(mx*ae_sqrt(absasum2, _state)+mb); d1esterror = ae_sqrt(e1*e2, _state); if( ae_fp_less_eq(ae_fabs(d1, _state),d1esterror) ) { *d1est = 0; } else { *d1est = ae_sign(d1, _state); } e1 = eps*md*md*absasum; e2 = eps*md*md*ae_sqrt(absasum2, _state); d2esterror = ae_sqrt(e1*e2, _state); if( ae_fp_less_eq(ae_fabs(d2, _state),d2esterror) ) { *d2est = 0; } else { *d2est = ae_sign(d2, _state); } } /************************************************************************* This function calculates inexact rank-K preconditioner for Hessian matrix H=D+W'*C*W, where: * H is a Hessian matrix, which is approximated by D/W/C * D is a diagonal matrix with positive entries * W is a rank-K correction * C is a diagonal factor of rank-K correction This preconditioner is inexact but fast - it requires O(N*K) time to be applied. Its main purpose - to be used in barrier/penalty/AUL methods, where ill-conditioning is created by combination of two factors: * simple bounds on variables => ill-conditioned D * general barrier/penalty => correction W with large coefficient C (makes problem ill-conditioned) but W itself is well conditioned. Preconditioner P is calculated by artificially constructing a set of BFGS updates which tries to reproduce behavior of H: * Sk = Wk (k-th row of W) * Yk = (D+Wk'*Ck*Wk)*Sk * Yk/Sk are reordered by ascending of C[k]*norm(Wk)^2 Here we assume that rows of Wk are orthogonal or nearly orthogonal, which allows us to have O(N*K+K^2) update instead of O(N*K^2) one. Reordering of updates is essential for having good performance on non-orthogonal problems (updates which do not add much of curvature are added first, and updates which add very large eigenvalues are added last and override effect of the first updates). On input this function takes direction S and components of H. On output it returns inv(H)*S -- ALGLIB -- Copyright 30.06.2014 by Bochkanov Sergey *************************************************************************/ void inexactlbfgspreconditioner(/* Real */ ae_vector* s, ae_int_t n, /* Real */ ae_vector* d, /* Real */ ae_vector* c, /* Real */ ae_matrix* w, ae_int_t k, precbuflbfgs* buf, ae_state *_state) { ae_int_t idx; ae_int_t i; ae_int_t j; double v; double v0; double v1; double vx; double vy; rvectorsetlengthatleast(&buf->norms, k, _state); rvectorsetlengthatleast(&buf->alpha, k, _state); rvectorsetlengthatleast(&buf->rho, k, _state); rmatrixsetlengthatleast(&buf->yk, k, n, _state); ivectorsetlengthatleast(&buf->idx, k, _state); /* * Check inputs */ for(i=0; i<=n-1; i++) { ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "InexactLBFGSPreconditioner: D[]<=0", _state); } for(i=0; i<=k-1; i++) { ae_assert(ae_fp_greater_eq(c->ptr.p_double[i],(double)(0)), "InexactLBFGSPreconditioner: C[]<0", _state); } /* * Reorder linear terms according to increase of second derivative. * Fill Norms[] array. */ for(idx=0; idx<=k-1; idx++) { v = ae_v_dotproduct(&w->ptr.pp_double[idx][0], 1, &w->ptr.pp_double[idx][0], 1, ae_v_len(0,n-1)); buf->norms.ptr.p_double[idx] = v*c->ptr.p_double[idx]; buf->idx.ptr.p_int[idx] = idx; } tagsortfasti(&buf->norms, &buf->idx, &buf->bufa, &buf->bufb, k, _state); /* * Apply updates */ for(idx=0; idx<=k-1; idx++) { /* * Select update to perform (ordered by ascending of second derivative) */ i = buf->idx.ptr.p_int[idx]; /* * Calculate YK and Rho */ v = ae_v_dotproduct(&w->ptr.pp_double[i][0], 1, &w->ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); v = v*c->ptr.p_double[i]; for(j=0; j<=n-1; j++) { buf->yk.ptr.pp_double[i][j] = (d->ptr.p_double[j]+v)*w->ptr.pp_double[i][j]; } v = 0.0; v0 = 0.0; v1 = 0.0; for(j=0; j<=n-1; j++) { vx = w->ptr.pp_double[i][j]; vy = buf->yk.ptr.pp_double[i][j]; v = v+vx*vy; v0 = v0+vx*vx; v1 = v1+vy*vy; } if( (ae_fp_greater(v,(double)(0))&&ae_fp_greater(v0*v1,(double)(0)))&&ae_fp_greater(v/ae_sqrt(v0*v1, _state),n*10*ae_machineepsilon) ) { buf->rho.ptr.p_double[i] = 1/v; } else { buf->rho.ptr.p_double[i] = 0.0; } } for(idx=k-1; idx>=0; idx--) { /* * Select update to perform (ordered by ascending of second derivative) */ i = buf->idx.ptr.p_int[idx]; /* * Calculate Alpha[] according to L-BFGS algorithm * and update S[] */ v = ae_v_dotproduct(&w->ptr.pp_double[i][0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); v = buf->rho.ptr.p_double[i]*v; buf->alpha.ptr.p_double[i] = v; ae_v_subd(&s->ptr.p_double[0], 1, &buf->yk.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } for(j=0; j<=n-1; j++) { s->ptr.p_double[j] = s->ptr.p_double[j]/d->ptr.p_double[j]; } for(idx=0; idx<=k-1; idx++) { /* * Select update to perform (ordered by ascending of second derivative) */ i = buf->idx.ptr.p_int[idx]; /* * Calculate Beta according to L-BFGS algorithm * and update S[] */ v = ae_v_dotproduct(&buf->yk.ptr.pp_double[i][0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); v = buf->alpha.ptr.p_double[i]-buf->rho.ptr.p_double[i]*v; ae_v_addd(&s->ptr.p_double[0], 1, &w->ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } } /************************************************************************* This function prepares exact low-rank preconditioner for Hessian matrix H=D+W'*C*W, where: * H is a Hessian matrix, which is approximated by D/W/C * D is a diagonal matrix with positive entries * W is a rank-K correction * C is a diagonal factor of rank-K correction, positive semidefinite This preconditioner is exact but relatively slow - it requires O(N*K^2) time to be prepared and O(N*K) time to be applied. It is calculated with the help of Woodbury matrix identity. It should be used as follows: * PrepareLowRankPreconditioner() call PREPARES data structure * subsequent calls to ApplyLowRankPreconditioner() APPLY preconditioner to user-specified search direction. -- ALGLIB -- Copyright 30.06.2014 by Bochkanov Sergey *************************************************************************/ void preparelowrankpreconditioner(/* Real */ ae_vector* d, /* Real */ ae_vector* c, /* Real */ ae_matrix* w, ae_int_t n, ae_int_t k, precbuflowrank* buf, ae_state *_state) { ae_int_t i; ae_int_t j; double v; ae_bool b; /* * Check inputs */ ae_assert(n>0, "PrepareLowRankPreconditioner: N<=0", _state); ae_assert(k>=0, "PrepareLowRankPreconditioner: N<=0", _state); for(i=0; i<=n-1; i++) { ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "PrepareLowRankPreconditioner: D[]<=0", _state); } for(i=0; i<=k-1; i++) { ae_assert(ae_fp_greater_eq(c->ptr.p_double[i],(double)(0)), "PrepareLowRankPreconditioner: C[]<0", _state); } /* * Prepare buffer structure; skip zero entries of update. */ rvectorsetlengthatleast(&buf->d, n, _state); rmatrixsetlengthatleast(&buf->v, k, n, _state); rvectorsetlengthatleast(&buf->bufc, k, _state); rmatrixsetlengthatleast(&buf->bufw, k+1, n, _state); buf->n = n; buf->k = 0; for(i=0; i<=k-1; i++) { /* * Estimate magnitude of update row; skip zero rows (either W or C are zero) */ v = 0.0; for(j=0; j<=n-1; j++) { v = v+w->ptr.pp_double[i][j]*w->ptr.pp_double[i][j]; } v = v*c->ptr.p_double[i]; if( ae_fp_eq(v,(double)(0)) ) { continue; } ae_assert(ae_fp_greater(v,(double)(0)), "PrepareLowRankPreconditioner: internal error", _state); /* * Copy non-zero update to buffer */ buf->bufc.ptr.p_double[buf->k] = c->ptr.p_double[i]; for(j=0; j<=n-1; j++) { buf->v.ptr.pp_double[buf->k][j] = w->ptr.pp_double[i][j]; buf->bufw.ptr.pp_double[buf->k][j] = w->ptr.pp_double[i][j]; } inc(&buf->k, _state); } /* * Reset K (for convenience) */ k = buf->k; /* * Prepare diagonal factor; quick exit for K=0 */ for(i=0; i<=n-1; i++) { buf->d.ptr.p_double[i] = 1/d->ptr.p_double[i]; } if( k==0 ) { return; } /* * Use Woodbury matrix identity */ rmatrixsetlengthatleast(&buf->bufz, k, k, _state); for(i=0; i<=k-1; i++) { for(j=0; j<=k-1; j++) { buf->bufz.ptr.pp_double[i][j] = 0.0; } } for(i=0; i<=k-1; i++) { buf->bufz.ptr.pp_double[i][i] = 1/buf->bufc.ptr.p_double[i]; } for(j=0; j<=n-1; j++) { buf->bufw.ptr.pp_double[k][j] = 1/ae_sqrt(d->ptr.p_double[j], _state); } for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { buf->bufw.ptr.pp_double[i][j] = buf->bufw.ptr.pp_double[i][j]*buf->bufw.ptr.pp_double[k][j]; } } rmatrixgemm(k, k, n, 1.0, &buf->bufw, 0, 0, 0, &buf->bufw, 0, 0, 1, 1.0, &buf->bufz, 0, 0, _state); b = spdmatrixcholeskyrec(&buf->bufz, 0, k, ae_true, &buf->tmp, _state); ae_assert(b, "PrepareLowRankPreconditioner: internal error (Cholesky failure)", _state); rmatrixlefttrsm(k, n, &buf->bufz, 0, 0, ae_true, ae_false, 1, &buf->v, 0, 0, _state); for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { buf->v.ptr.pp_double[i][j] = buf->v.ptr.pp_double[i][j]*buf->d.ptr.p_double[j]; } } } /************************************************************************* This function apply exact low-rank preconditioner prepared by PrepareLowRankPreconditioner function (see its comments for more information). -- ALGLIB -- Copyright 30.06.2014 by Bochkanov Sergey *************************************************************************/ void applylowrankpreconditioner(/* Real */ ae_vector* s, precbuflowrank* buf, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; ae_int_t j; double v; n = buf->n; k = buf->k; rvectorsetlengthatleast(&buf->tmp, n, _state); for(j=0; j<=n-1; j++) { buf->tmp.ptr.p_double[j] = buf->d.ptr.p_double[j]*s->ptr.p_double[j]; } for(i=0; i<=k-1; i++) { v = 0.0; for(j=0; j<=n-1; j++) { v = v+buf->v.ptr.pp_double[i][j]*s->ptr.p_double[j]; } for(j=0; j<=n-1; j++) { buf->tmp.ptr.p_double[j] = buf->tmp.ptr.p_double[j]-v*buf->v.ptr.pp_double[i][j]; } } for(i=0; i<=n-1; i++) { s->ptr.p_double[i] = buf->tmp.ptr.p_double[i]; } } /************************************************************************* This subroutine initializes smoothness monitor at the beginning of the optimization session. It is possible to perform "dummy" initialization with N=K=0. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorinit(smoothnessmonitor* monitor, ae_int_t n, ae_int_t k, ae_bool checksmoothness, ae_state *_state) { monitor->n = n; monitor->k = k; monitor->checksmoothness = checksmoothness; monitor->linesearchspoiled = ae_false; monitor->linesearchstarted = ae_false; monitor->enqueuedcnt = 0; monitor->sortedcnt = 0; monitor->nonc0currentrating = 0.0; monitor->nonc1currentrating = 0.0; optguardinitinternal(&monitor->rep, n, k, _state); monitor->nonc0strrating = 0.0; monitor->nonc0lngrating = -ae_maxrealnumber; monitor->nonc0strrep.positive = ae_false; monitor->nonc0lngrep.positive = ae_false; monitor->nonc1test0strrating = 0.0; monitor->nonc1test0lngrating = -ae_maxrealnumber; monitor->nonc1test0strrep.positive = ae_false; monitor->nonc1test0lngrep.positive = ae_false; monitor->nonc1test1strrating = 0.0; monitor->nonc1test1lngrating = -ae_maxrealnumber; monitor->nonc1test1strrep.positive = ae_false; monitor->nonc1test1lngrep.positive = ae_false; monitor->badgradhasxj = ae_false; ae_vector_set_length(&monitor->rstateg0.ia, 4+1, _state); ae_vector_set_length(&monitor->rstateg0.ra, 3+1, _state); monitor->rstateg0.stage = -1; } /************************************************************************* This subroutine starts line search -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorstartlinesearch(smoothnessmonitor* monitor, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; ae_int_t j; double v; n = monitor->n; k = monitor->k; /* * Skip if inactive or spoiled by NAN */ if( !monitor->checksmoothness ) { return; } v = (double)(0); for(i=0; i<=n-1; i++) { v = 0.5*v+x->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { v = 0.5*v+fi->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { v = 0.5*v+jac->ptr.pp_double[i][j]; } } if( !ae_isfinite(v, _state) ) { monitor->linesearchspoiled = ae_true; return; } /* * Finalize previous line search */ if( monitor->enqueuedcnt>0 ) { smoothnessmonitorfinalizelinesearch(monitor, _state); } /* * Store initial point */ monitor->linesearchstarted = ae_true; monitor->enqueuedcnt = 1; rvectorgrowto(&monitor->enqueuedstp, monitor->enqueuedcnt, _state); rvectorgrowto(&monitor->enqueuedx, monitor->enqueuedcnt*n, _state); rvectorgrowto(&monitor->enqueuedfunc, monitor->enqueuedcnt*k, _state); rmatrixgrowrowsto(&monitor->enqueuedjac, monitor->enqueuedcnt*k, n, _state); monitor->enqueuedstp.ptr.p_double[0] = 0.0; for(j=0; j<=n-1; j++) { monitor->enqueuedx.ptr.p_double[j] = x->ptr.p_double[j]; } for(i=0; i<=k-1; i++) { monitor->enqueuedfunc.ptr.p_double[i] = fi->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { monitor->enqueuedjac.ptr.pp_double[i][j] = jac->ptr.pp_double[i][j]; } } /* * Initialize sorted representation */ rvectorgrowto(&monitor->sortedstp, 1, _state); ivectorgrowto(&monitor->sortedidx, 1, _state); monitor->sortedstp.ptr.p_double[0] = 0.0; monitor->sortedidx.ptr.p_int[0] = 0; monitor->sortedcnt = 1; } /************************************************************************* This subroutine starts line search for a scalar function - convenience wrapper for ....StartLineSearch() with unscaled variables. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorstartlinesearch1u(smoothnessmonitor* monitor, /* Real */ ae_vector* s, /* Real */ ae_vector* invs, /* Real */ ae_vector* x, double f0, /* Real */ ae_vector* j0, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; n = monitor->n; k = monitor->k; if( !monitor->checksmoothness ) { return; } ae_assert(k==1, "SmoothnessMonitorStartLineSearch1: K<>1", _state); rvectorsetlengthatleast(&monitor->xu, n, _state); rvectorsetlengthatleast(&monitor->f0, 1, _state); rmatrixsetlengthatleast(&monitor->j0, 1, n, _state); monitor->f0.ptr.p_double[0] = f0; for(i=0; i<=n-1; i++) { monitor->xu.ptr.p_double[i] = x->ptr.p_double[i]*invs->ptr.p_double[i]; monitor->j0.ptr.pp_double[0][i] = j0->ptr.p_double[i]*s->ptr.p_double[i]; } smoothnessmonitorstartlinesearch(monitor, &monitor->xu, &monitor->f0, &monitor->j0, _state); } /************************************************************************* This subroutine enqueues one more trial point -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorenqueuepoint(smoothnessmonitor* monitor, /* Real */ ae_vector* d, double stp, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; ae_int_t j; double v; ae_int_t enqueuedcnt; ae_int_t sortedcnt; ae_bool hasduplicates; ae_int_t funcidx; ae_int_t stpidx; double f0; double f1; double f2; double f3; double f4; double noise0; double noise1; double noise2; double noise3; double rating; double lipschitz; double nrm; double lengthrating; n = monitor->n; k = monitor->k; /* * Skip if inactive or spoiled by NAN */ if( (!monitor->checksmoothness||monitor->linesearchspoiled)||!monitor->linesearchstarted ) { return; } v = stp; for(i=0; i<=n-1; i++) { v = 0.5*v+x->ptr.p_double[i]; } for(i=0; i<=n-1; i++) { v = 0.5*v+d->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { v = 0.5*v+fi->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { v = 0.5*v+jac->ptr.pp_double[i][j]; } } if( !ae_isfinite(v, _state) ) { monitor->linesearchspoiled = ae_true; return; } /* * Enqueue */ inc(&monitor->enqueuedcnt, _state); enqueuedcnt = monitor->enqueuedcnt; rvectorgrowto(&monitor->dcur, n, _state); rvectorgrowto(&monitor->enqueuedstp, enqueuedcnt, _state); rvectorgrowto(&monitor->enqueuedx, enqueuedcnt*n, _state); rvectorgrowto(&monitor->enqueuedfunc, enqueuedcnt*k, _state); rmatrixgrowrowsto(&monitor->enqueuedjac, enqueuedcnt*k, n, _state); monitor->enqueuedstp.ptr.p_double[enqueuedcnt-1] = stp; for(j=0; j<=n-1; j++) { monitor->dcur.ptr.p_double[j] = d->ptr.p_double[j]; } for(j=0; j<=n-1; j++) { monitor->enqueuedx.ptr.p_double[(enqueuedcnt-1)*n+j] = x->ptr.p_double[j]; } for(i=0; i<=k-1; i++) { monitor->enqueuedfunc.ptr.p_double[(enqueuedcnt-1)*k+i] = fi->ptr.p_double[i]; } for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { monitor->enqueuedjac.ptr.pp_double[(enqueuedcnt-1)*k+i][j] = jac->ptr.pp_double[i][j]; } } /* * Update sorted representation: insert to the end, reorder */ sortedcnt = monitor->sortedcnt; hasduplicates = ae_false; for(i=0; i<=sortedcnt-1; i++) { hasduplicates = hasduplicates||monitor->sortedstp.ptr.p_double[i]==stp; } if( !hasduplicates ) { inc(&monitor->sortedcnt, _state); sortedcnt = monitor->sortedcnt; rvectorgrowto(&monitor->sortedstp, sortedcnt, _state); ivectorgrowto(&monitor->sortedidx, sortedcnt, _state); monitor->sortedstp.ptr.p_double[sortedcnt-1] = stp; monitor->sortedidx.ptr.p_int[sortedcnt-1] = enqueuedcnt-1; for(i=sortedcnt-2; i>=0; i--) { if( monitor->sortedstp.ptr.p_double[i]<=monitor->sortedstp.ptr.p_double[i+1] ) { break; } v = monitor->sortedstp.ptr.p_double[i]; monitor->sortedstp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i+1]; monitor->sortedstp.ptr.p_double[i+1] = v; j = monitor->sortedidx.ptr.p_int[i]; monitor->sortedidx.ptr.p_int[i] = monitor->sortedidx.ptr.p_int[i+1]; monitor->sortedidx.ptr.p_int[i+1] = j; } } /* * Scan sorted representation, check for C0 and C1 continuity * violations. */ rvectorsetlengthatleast(&monitor->f, sortedcnt, _state); rvectorsetlengthatleast(&monitor->g, sortedcnt*n, _state); for(funcidx=0; funcidx<=k-1; funcidx++) { /* * Fetch current function and its gradient to the contiguous storage */ for(i=0; i<=sortedcnt-1; i++) { monitor->f.ptr.p_double[i] = monitor->enqueuedfunc.ptr.p_double[monitor->sortedidx.ptr.p_int[i]*k+funcidx]; for(j=0; j<=n-1; j++) { monitor->g.ptr.p_double[i*n+j] = monitor->enqueuedjac.ptr.pp_double[monitor->sortedidx.ptr.p_int[i]*k+funcidx][j]; } } /* * Check C0 continuity. * * The basis approach is that we find appropriate candidate point * (either a local minimum along the line - for target; or an interval * where function sign is changed - for constraints), calculate left * and right estimates of the Lipschitz constant (slopes between points * #0 and #1, #2 and #3), and then calculate slope between points #1 and * #2 and compare it with left/right estimates. * * The actual approach is a bit more complex to account for different * sources of numerical noise and different false positive scenarios. */ if( funcidx==0 ) { for(stpidx=0; stpidx<=sortedcnt-4; stpidx++) { f0 = monitor->f.ptr.p_double[stpidx+0]; f1 = monitor->f.ptr.p_double[stpidx+1]; f2 = monitor->f.ptr.p_double[stpidx+2]; f3 = monitor->f.ptr.p_double[stpidx+3]; noise0 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f0, _state), 1.0, _state); noise1 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f1, _state), 1.0, _state); noise2 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f2, _state), 1.0, _state); noise3 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f3, _state), 1.0, _state); if( !(f1sortedstp.ptr.p_double[stpidx+1]-monitor->sortedstp.ptr.p_double[stpidx+0], monitor->sortedstp.ptr.p_double[stpidx+2]-monitor->sortedstp.ptr.p_double[stpidx+1], monitor->sortedstp.ptr.p_double[stpidx+3]-monitor->sortedstp.ptr.p_double[stpidx+2], ae_false, &rating, &lipschitz, _state); if( rating>optserv_ogminrating0 ) { /* * Store to total report */ monitor->rep.nonc0suspected = ae_true; monitor->rep.nonc0test0positive = ae_true; if( rating>monitor->nonc0currentrating ) { monitor->nonc0currentrating = rating; monitor->rep.nonc0lipschitzc = lipschitz; monitor->rep.nonc0fidx = funcidx; } /* * Store to "strongest" report */ if( rating>monitor->nonc0strrating ) { monitor->nonc0strrating = rating; monitor->nonc0strrep.positive = ae_true; monitor->nonc0strrep.fidx = funcidx; monitor->nonc0strrep.n = n; monitor->nonc0strrep.cnt = sortedcnt; monitor->nonc0strrep.stpidxa = stpidx+0; monitor->nonc0strrep.stpidxb = stpidx+3; rvectorsetlengthatleast(&monitor->nonc0strrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc0strrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc0strrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc0strrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc0strrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc0strrep.f, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc0strrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc0strrep.f.ptr.p_double[i] = monitor->f.ptr.p_double[i]; } } /* * Store to "longest" report */ nrm = (double)(0); for(i=0; i<=n-1; i++) { nrm = nrm+ae_sqr(monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]-monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[sortedcnt-1]*n+i], _state); } nrm = ae_sqrt(nrm, _state); nrm = ae_minreal(nrm, 1.0, _state); nrm = coalesce(nrm, ae_machineepsilon, _state); lengthrating = sortedcnt+ae_log(nrm, _state)/ae_log((double)(100), _state); if( lengthrating>monitor->nonc0lngrating ) { monitor->nonc0lngrating = lengthrating; monitor->nonc0lngrep.positive = ae_true; monitor->nonc0lngrep.fidx = funcidx; monitor->nonc0lngrep.n = n; monitor->nonc0lngrep.cnt = sortedcnt; monitor->nonc0lngrep.stpidxa = stpidx+0; monitor->nonc0lngrep.stpidxb = stpidx+3; rvectorsetlengthatleast(&monitor->nonc0lngrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc0lngrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc0lngrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc0lngrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc0lngrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc0lngrep.f, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc0lngrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc0lngrep.f.ptr.p_double[i] = monitor->f.ptr.p_double[i]; } } } } } /* * C1 continuity test #0 */ for(stpidx=0; stpidx<=sortedcnt-7; stpidx++) { /* * Fetch function values */ f2 = monitor->f.ptr.p_double[stpidx+2]; f3 = monitor->f.ptr.p_double[stpidx+3]; f4 = monitor->f.ptr.p_double[stpidx+4]; noise2 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f2, _state), 1.0, _state); noise3 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f3, _state), 1.0, _state); /* * Decide whether we want to test this interval or not; for target * function we test intervals around minimum, for constraints we * test intervals of sign change. */ if( funcidx==0 ) { /* * Skip if not minimum */ if( !(f30 ) { continue; } } optserv_c1continuitytest0(monitor, funcidx, stpidx+0, sortedcnt, _state); optserv_c1continuitytest0(monitor, funcidx, stpidx+1, sortedcnt, _state); } /* * C1 continuity test #1 */ for(stpidx=0; stpidx<=sortedcnt-4; stpidx++) { /* * Fetch function values from the interval being tested */ f0 = monitor->f.ptr.p_double[stpidx+0]; f1 = monitor->f.ptr.p_double[stpidx+1]; f2 = monitor->f.ptr.p_double[stpidx+2]; f3 = monitor->f.ptr.p_double[stpidx+3]; noise0 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f0, _state), 1.0, _state); noise1 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f1, _state), 1.0, _state); noise2 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f2, _state), 1.0, _state); noise3 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f3, _state), 1.0, _state); /* * Decide whether we want to test this interval or not; for target * function we test intervals around minimum, for constraints we * test intervals of sign change. */ if( funcidx==0 ) { /* * Skip if not minimum */ if( !(f10 ) { continue; } } optserv_c1continuitytest1(monitor, funcidx, stpidx, sortedcnt, _state); } } } /************************************************************************* This subroutine enqueues one more trial point for a task with scalar function with unscaled variables - a convenience wrapper for more general EnqueuePoint() -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorenqueuepoint1u(smoothnessmonitor* monitor, /* Real */ ae_vector* s, /* Real */ ae_vector* invs, /* Real */ ae_vector* d, double stp, /* Real */ ae_vector* x, double f0, /* Real */ ae_vector* j0, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; n = monitor->n; k = monitor->k; if( !monitor->checksmoothness ) { return; } ae_assert(k==1, "SmoothnessMonitorEnqueuePoint1: K<>1", _state); rvectorsetlengthatleast(&monitor->xu, n, _state); rvectorsetlengthatleast(&monitor->du, n, _state); rvectorsetlengthatleast(&monitor->f0, 1, _state); rmatrixsetlengthatleast(&monitor->j0, 1, n, _state); monitor->f0.ptr.p_double[0] = f0; for(i=0; i<=n-1; i++) { monitor->xu.ptr.p_double[i] = x->ptr.p_double[i]*invs->ptr.p_double[i]; monitor->du.ptr.p_double[i] = d->ptr.p_double[i]*invs->ptr.p_double[i]; monitor->j0.ptr.pp_double[0][i] = j0->ptr.p_double[i]*s->ptr.p_double[i]; } smoothnessmonitorenqueuepoint(monitor, &monitor->du, stp, &monitor->xu, &monitor->f0, &monitor->j0, _state); } /************************************************************************* This subroutine finalizes line search -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorfinalizelinesearch(smoothnessmonitor* monitor, ae_state *_state) { /* * As for now - nothing to be done. */ } /************************************************************************* This function starts aggressive probing for a range of step lengths [0,StpMax]. This function stores NValues values per step, with the first one (index 0) value being "primary" one (target function / merit function) and the rest being supplementary ones. -- ALGLIB -- Copyright 10.10.2019 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorstartprobing(smoothnessmonitor* monitor, double stpmax, ae_int_t nvalues, double stepscale, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state)&&ae_fp_greater(stpmax,(double)(0)), "SmoothnessMonitorStartProbing: StpMax<=0", _state); ae_assert(nvalues>=1, "SmoothnessMonitorStartProbing: NValues<1", _state); ae_assert(ae_isfinite(stepscale, _state)&&ae_fp_greater_eq(stepscale,(double)(0)), "SmoothnessMonitorStartProbing: StepScale<0", _state); monitor->probingnvalues = nvalues; monitor->probingnstepsstored = 0; monitor->probingstepmax = stpmax; monitor->probingstepscale = stepscale; rvectorsetlengthatleast(&monitor->probingf, nvalues, _state); ae_vector_set_length(&monitor->probingrcomm.ia, 1+1, _state); monitor->probingrcomm.stage = -1; } /************************************************************************* This function performs aggressive probing. After each call it returns step to evaluate in Monitor.ProbingStp. Load values being probed into Monitor.ProbingF and continue iteration. -- ALGLIB -- Copyright 10.10.2019 by Bochkanov Sergey *************************************************************************/ ae_bool smoothnessmonitorprobe(smoothnessmonitor* monitor, ae_state *_state) { ae_int_t i; ae_int_t j; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( monitor->probingrcomm.stage>=0 ) { i = monitor->probingrcomm.ia.ptr.p_int[0]; j = monitor->probingrcomm.ia.ptr.p_int[1]; } else { i = 359; j = -58; } if( monitor->probingrcomm.stage==0 ) { goto lbl_0; } /* * Routine body */ i = 0; lbl_1: if( i>40 ) { goto lbl_3; } rvectorgrowto(&monitor->probingsteps, monitor->probingnstepsstored+1, _state); rmatrixgrowrowsto(&monitor->probingvalues, monitor->probingnstepsstored+1, monitor->probingnvalues, _state); rmatrixgrowrowsto(&monitor->probingslopes, monitor->probingnstepsstored+1, monitor->probingnvalues, _state); if( i<=20 ) { monitor->probingstp = ae_pow(0.66, (double)(i), _state)*monitor->probingstepmax; } else { monitor->probingstp = ae_pow(0.95, (double)(i-20), _state)*monitor->probingstepmax; } monitor->probingsteps.ptr.p_double[monitor->probingnstepsstored] = monitor->probingstp; monitor->probingrcomm.stage = 0; goto lbl_rcomm; lbl_0: for(j=0; j<=monitor->probingnvalues-1; j++) { monitor->probingvalues.ptr.pp_double[monitor->probingnstepsstored][j] = monitor->probingf.ptr.p_double[j]; monitor->probingslopes.ptr.pp_double[monitor->probingnstepsstored][j] = (double)(0); } inc(&monitor->probingnstepsstored, _state); i = i+1; goto lbl_1; lbl_3: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; monitor->probingrcomm.ia.ptr.p_int[0] = i; monitor->probingrcomm.ia.ptr.p_int[1] = j; return result; } /************************************************************************* This function prints probing results to trace log. Tracing is performed using fixed width for all columns, so you may print a header before printing trace - and reasonably expect that its width will match that of the trace. This function promises that it wont change trace output format without introducing breaking changes into its signature. NOTE: this function ALWAYS tries to print results; it is caller's responsibility to decide whether he needs tracing or not. -- ALGLIB -- Copyright 10.10.2019 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitortraceprobingresults(smoothnessmonitor* monitor, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; double steplen; /* * First, sort elements by their step */ for(i=0; i<=monitor->probingnstepsstored-1; i++) { k = i; for(j=i; j<=monitor->probingnstepsstored-1; j++) { if( ae_fp_less(monitor->probingsteps.ptr.p_double[j],monitor->probingsteps.ptr.p_double[k]) ) { k = j; } } swapelements(&monitor->probingsteps, i, k, _state); swaprows(&monitor->probingvalues, i, k, monitor->probingnvalues, _state); } for(i=0; i<=monitor->probingnstepsstored-2; i++) { for(j=0; j<=monitor->probingnvalues-1; j++) { steplen = (monitor->probingsteps.ptr.p_double[i+1]-monitor->probingsteps.ptr.p_double[i]+100*ae_machineepsilon)*(monitor->probingstepscale+ae_machineepsilon); monitor->probingslopes.ptr.pp_double[i][j] = (monitor->probingvalues.ptr.pp_double[i+1][j]-monitor->probingvalues.ptr.pp_double[i][j])/steplen; } } if( monitor->probingnstepsstored>=1 ) { for(j=0; j<=monitor->probingnvalues-1; j++) { monitor->probingslopes.ptr.pp_double[monitor->probingnstepsstored-1][j] = monitor->probingslopes.ptr.pp_double[ae_maxint(monitor->probingnstepsstored-2, 0, _state)][j]; } } /* * Print to trace log */ ae_trace("*** ----------"); for(j=0; j<=monitor->probingnvalues-1; j++) { ae_trace("-------------------------"); } ae_trace("\n"); for(i=0; i<=monitor->probingnstepsstored-1; i++) { ae_trace("*** | %0.4f |", (double)(monitor->probingsteps.ptr.p_double[i])); for(j=0; j<=monitor->probingnvalues-1; j++) { ae_trace(" %11.3e %10.2e |", (double)(monitor->probingvalues.ptr.pp_double[i][j]-monitor->probingvalues.ptr.pp_double[0][j]), (double)(monitor->probingslopes.ptr.pp_double[i][j])); } ae_trace("\n"); } ae_trace("*** ----------"); for(j=0; j<=monitor->probingnvalues-1; j++) { ae_trace("-------------------------"); } ae_trace("\n"); } /************************************************************************* This subroutine tells monitor to output trace info. If CallerSuggestsTrace=True, monitor ALWAYS prints trace, even if no suspicions were raised during optimization. If CallerSuggestsTrace=False, the monitor will print trace only if: * trace was requested by trace tag 'OPTGUARD' AND suspicious points were found during optimization * trace was requested by trace tag 'OPTGUARD.ALWAYS' - always -- ALGLIB -- Copyright 11.10.2019 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitortracestatus(smoothnessmonitor* monitor, ae_bool callersuggeststrace, ae_state *_state) { ae_bool needreport; ae_bool needxdreport; ae_bool suspicionsraised; ae_int_t i; double slope; /* * Do we need trace report? */ suspicionsraised = (monitor->rep.nonc0suspected||monitor->rep.nonc1suspected)||monitor->rep.badgradsuspected; needreport = ae_false; needreport = needreport||callersuggeststrace; needreport = needreport||ae_is_trace_enabled("OPTGUARD.ALWAYS"); needreport = needreport||(ae_is_trace_enabled("OPTGUARD")&&suspicionsraised); if( !needreport ) { return; } needxdreport = needreport&&ae_is_trace_enabled("OPTIMIZERS.X"); /* * */ ae_trace("\n"); ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); ae_trace("// OPTGUARD INTEGRITY CHECKER REPORT //\n"); ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); if( !suspicionsraised ) { ae_trace("> no discontinuity/nonsmoothness/bad-gradient suspicions were raised during optimization\n"); return; } if( monitor->rep.nonc0suspected ) { ae_trace("> [WARNING] suspected discontinuity (aka C0-discontinuity)\n"); } if( monitor->rep.nonc1suspected ) { ae_trace("> [WARNING] suspected nonsmoothness (aka C1-discontinuity)\n"); } ae_trace("> printing out test reports...\n"); if( monitor->rep.nonc0suspected&&monitor->rep.nonc0test0positive ) { ae_trace("> printing out discontinuity test #0 report:\n"); ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | Test #0 for discontinuity was triggered (this test |\n"); ae_trace("*** | analyzes changes in function values). See below for |\n"); ae_trace("*** | detailed info: |\n"); ae_trace("*** | * function index: %10d", (int)(monitor->nonc0lngrep.fidx)); if( monitor->nonc0lngrep.fidx==0 ) { ae_trace(" (target) |\n"); } else { ae_trace(" (constraint) |\n"); } ae_trace("*** | * F() Lipschitz const: %10.2e |\n", (double)(monitor->rep.nonc0lipschitzc)); ae_trace("*** | Printing out log of suspicious line search XK+Stp*D |\n"); ae_trace("*** | Look for abrupt changes in slope. |\n"); if( !needxdreport ) { ae_trace("*** | NOTE: XK and D are not printed by default. If you |\n"); ae_trace("*** | need them, add trace tag OPTIMIZERS.X |\n"); } ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | step along D | delta F | slope |\n"); ae_trace("*** ------------------------------------------------------|\n"); for(i=0; i<=monitor->nonc0lngrep.cnt-1; i++) { slope = monitor->nonc0lngrep.f.ptr.p_double[ae_minint(i+1, monitor->nonc0lngrep.cnt-1, _state)]-monitor->nonc0lngrep.f.ptr.p_double[i]; slope = slope/(1.0e-15+monitor->nonc0lngrep.stp.ptr.p_double[ae_minint(i+1, monitor->nonc0lngrep.cnt-1, _state)]-monitor->nonc0lngrep.stp.ptr.p_double[i]); ae_trace("*** | %13.5e | %13.5e | %11.3e |", (double)(monitor->nonc0lngrep.stp.ptr.p_double[i]), (double)(monitor->nonc0lngrep.f.ptr.p_double[i]-monitor->nonc0lngrep.f.ptr.p_double[0]), (double)(slope)); if( i>=monitor->nonc0lngrep.stpidxa&&i<=monitor->nonc0lngrep.stpidxb ) { ae_trace(" <---"); } ae_trace("\n"); } ae_trace("*** ------------------------------------------------------|\n"); if( needxdreport ) { ae_trace("*** XK = "); tracevectore6(&monitor->nonc0lngrep.x0, 0, monitor->n, _state); ae_trace("\n"); ae_trace("*** D = "); tracevectore6(&monitor->nonc0lngrep.d, 0, monitor->n, _state); ae_trace("\n"); } } if( monitor->rep.nonc1suspected&&monitor->rep.nonc1test0positive ) { ae_trace("> printing out nonsmoothness test #0 report:\n"); ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | Test #0 for nonsmoothness was triggered (this test |\n"); ae_trace("*** | analyzes changes in function values and ignores |\n"); ae_trace("*** | gradient info). See below for detailed info: |\n"); ae_trace("*** | * function index: %10d", (int)(monitor->nonc1test0lngrep.fidx)); if( monitor->nonc1test0lngrep.fidx==0 ) { ae_trace(" (target) |\n"); } else { ae_trace(" (constraint) |\n"); } ae_trace("*** | * dF/dX Lipschitz const: %10.2e |\n", (double)(monitor->rep.nonc1lipschitzc)); ae_trace("*** | Printing out log of suspicious line search XK+Stp*D |\n"); ae_trace("*** | Look for abrupt changes in slope. |\n"); if( !needxdreport ) { ae_trace("*** | NOTE: XK and D are not printed by default. If you |\n"); ae_trace("*** | need them, add trace tag OPTIMIZERS.X |\n"); } ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | step along D | delta F | slope |\n"); ae_trace("*** ------------------------------------------------------|\n"); for(i=0; i<=monitor->nonc1test0lngrep.cnt-1; i++) { slope = monitor->nonc1test0lngrep.f.ptr.p_double[ae_minint(i+1, monitor->nonc1test0lngrep.cnt-1, _state)]-monitor->nonc1test0lngrep.f.ptr.p_double[i]; slope = slope/(1.0e-15+monitor->nonc1test0lngrep.stp.ptr.p_double[ae_minint(i+1, monitor->nonc1test0lngrep.cnt-1, _state)]-monitor->nonc1test0lngrep.stp.ptr.p_double[i]); ae_trace("*** | %13.5e | %13.5e | %11.3e |", (double)(monitor->nonc1test0lngrep.stp.ptr.p_double[i]), (double)(monitor->nonc1test0lngrep.f.ptr.p_double[i]-monitor->nonc1test0lngrep.f.ptr.p_double[0]), (double)(slope)); if( i>=monitor->nonc1test0lngrep.stpidxa&&i<=monitor->nonc1test0lngrep.stpidxb ) { ae_trace(" <---"); } ae_trace("\n"); } ae_trace("*** ------------------------------------------------------|\n"); if( needxdreport ) { ae_trace("*** XK = "); tracevectore6(&monitor->nonc1test0lngrep.x0, 0, monitor->n, _state); ae_trace("\n"); ae_trace("*** D = "); tracevectore6(&monitor->nonc1test0lngrep.d, 0, monitor->n, _state); ae_trace("\n"); } } if( monitor->rep.nonc1suspected&&monitor->rep.nonc1test1positive ) { ae_trace("> printing out nonsmoothness test #1 report:\n"); ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | Test #1 for nonsmoothness was triggered (this test |\n"); ae_trace("*** | analyzes changes in gradient components). See below |\n"); ae_trace("*** | for detailed info: |\n"); ae_trace("*** | * function index: %10d", (int)(monitor->nonc1test1lngrep.fidx)); if( monitor->nonc1test1lngrep.fidx==0 ) { ae_trace(" (target) |\n"); } else { ae_trace(" (constraint) |\n"); } ae_trace("*** | * variable index I: %10d |\n", (int)(monitor->nonc1test1lngrep.vidx)); ae_trace("*** | * dF/dX Lipschitz const: %10.2e |\n", (double)(monitor->rep.nonc1lipschitzc)); ae_trace("*** | Printing out log of suspicious line search XK+Stp*D |\n"); ae_trace("*** | Look for abrupt changes in slope. |\n"); if( !needxdreport ) { ae_trace("*** | NOTE: XK and D are not printed by default. If you |\n"); ae_trace("*** | need them, add trace tag OPTIMIZERS.X |\n"); } ae_trace("*** -------------------------------------------------------\n"); ae_trace("*** | step along D | delta Gi | slope |\n"); ae_trace("*** ------------------------------------------------------|\n"); for(i=0; i<=monitor->nonc1test1lngrep.cnt-1; i++) { slope = monitor->nonc1test1lngrep.g.ptr.p_double[ae_minint(i+1, monitor->nonc1test1lngrep.cnt-1, _state)]-monitor->nonc1test1lngrep.g.ptr.p_double[i]; slope = slope/(1.0e-15+monitor->nonc1test1lngrep.stp.ptr.p_double[ae_minint(i+1, monitor->nonc1test1lngrep.cnt-1, _state)]-monitor->nonc1test1lngrep.stp.ptr.p_double[i]); ae_trace("*** | %13.5e | %13.5e | %11.3e |", (double)(monitor->nonc1test1lngrep.stp.ptr.p_double[i]), (double)(monitor->nonc1test1lngrep.g.ptr.p_double[i]-monitor->nonc1test1lngrep.g.ptr.p_double[0]), (double)(slope)); if( i>=monitor->nonc1test1lngrep.stpidxa&&i<=monitor->nonc1test1lngrep.stpidxb ) { ae_trace(" <---"); } ae_trace("\n"); } ae_trace("*** ------------------------------------------------------|\n"); if( needxdreport ) { ae_trace("*** XK = "); tracevectore6(&monitor->nonc1test1lngrep.x0, 0, monitor->n, _state); ae_trace("\n"); ae_trace("*** D = "); tracevectore6(&monitor->nonc1test1lngrep.d, 0, monitor->n, _state); ae_trace("\n"); } } } /************************************************************************* This subroutine exports report to user-readable representation (all arrays are forced to have exactly same size as needed; unused arrays are set to zero length). -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ void smoothnessmonitorexportreport(smoothnessmonitor* monitor, optguardreport* rep, ae_state *_state) { /* * Finalize last line search, just to be sure */ if( monitor->enqueuedcnt>0 ) { smoothnessmonitorfinalizelinesearch(monitor, _state); } /* * Export report */ optguardexportreport(&monitor->rep, monitor->n, monitor->k, monitor->badgradhasxj, rep, _state); } /************************************************************************* Check numerical gradient at point X0 (unscaled variables!), with optional box constraints [BndL,BndU] (if HasBoxConstraints=True) and with scale vector S[]. Step S[i]*TestStep is performed along I-th variable. NeedFiJ rcomm protocol is used to request derivative information. Box constraints BndL/BndU are expected to be feasible. It is possible to have BndL=BndU. -- ALGLIB -- Copyright 06.12.2018 by Bochkanov Sergey *************************************************************************/ ae_bool smoothnessmonitorcheckgradientatx0(smoothnessmonitor* monitor, /* Real */ ae_vector* unscaledx0, /* Real */ ae_vector* s, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_bool hasboxconstraints, double teststep, ae_state *_state) { ae_int_t n; ae_int_t k; ae_int_t i; ae_int_t j; ae_int_t varidx; double v; double vp; double vm; double vc; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( monitor->rstateg0.stage>=0 ) { n = monitor->rstateg0.ia.ptr.p_int[0]; k = monitor->rstateg0.ia.ptr.p_int[1]; i = monitor->rstateg0.ia.ptr.p_int[2]; j = monitor->rstateg0.ia.ptr.p_int[3]; varidx = monitor->rstateg0.ia.ptr.p_int[4]; v = monitor->rstateg0.ra.ptr.p_double[0]; vp = monitor->rstateg0.ra.ptr.p_double[1]; vm = monitor->rstateg0.ra.ptr.p_double[2]; vc = monitor->rstateg0.ra.ptr.p_double[3]; } else { n = -919; k = -909; i = 81; j = 255; varidx = 74; v = -788; vp = 809; vm = 205; vc = -838; } if( monitor->rstateg0.stage==0 ) { goto lbl_0; } if( monitor->rstateg0.stage==1 ) { goto lbl_1; } if( monitor->rstateg0.stage==2 ) { goto lbl_2; } if( monitor->rstateg0.stage==3 ) { goto lbl_3; } /* * Routine body */ n = monitor->n; k = monitor->k; monitor->needfij = ae_false; /* * Quick exit */ if( ((n<=0||k<=0)||!ae_isfinite(teststep, _state))||ae_fp_eq(teststep,(double)(0)) ) { result = ae_false; return result; } teststep = ae_fabs(teststep, _state); /* * Allocate storage */ rvectorsetlengthatleast(&monitor->x, n, _state); rvectorsetlengthatleast(&monitor->fi, k, _state); rmatrixsetlengthatleast(&monitor->j, k, n, _state); rvectorsetlengthatleast(&monitor->xbase, n, _state); rvectorsetlengthatleast(&monitor->fbase, k, _state); rvectorsetlengthatleast(&monitor->fm, k, _state); rvectorsetlengthatleast(&monitor->fc, k, _state); rvectorsetlengthatleast(&monitor->fp, k, _state); rvectorsetlengthatleast(&monitor->jm, k, _state); rvectorsetlengthatleast(&monitor->jc, k, _state); rvectorsetlengthatleast(&monitor->jp, k, _state); rmatrixsetlengthatleast(&monitor->jbaseusr, k, n, _state); rmatrixsetlengthatleast(&monitor->jbasenum, k, n, _state); rvectorsetlengthatleast(&monitor->rep.badgradxbase, n, _state); rmatrixsetlengthatleast(&monitor->rep.badgraduser, k, n, _state); rmatrixsetlengthatleast(&monitor->rep.badgradnum, k, n, _state); /* * Set XBase/Jacobian presence flag */ monitor->badgradhasxj = ae_true; /* * Determine reference point, compute function vector and user-supplied Jacobian */ for(i=0; i<=n-1; i++) { v = unscaledx0->ptr.p_double[i]; if( (hasboxconstraints&&ae_isfinite(bndl->ptr.p_double[i], _state))&&ae_fp_less(v,bndl->ptr.p_double[i]) ) { v = bndl->ptr.p_double[i]; } if( (hasboxconstraints&&ae_isfinite(bndu->ptr.p_double[i], _state))&&ae_fp_greater(v,bndu->ptr.p_double[i]) ) { v = bndu->ptr.p_double[i]; } monitor->xbase.ptr.p_double[i] = v; monitor->rep.badgradxbase.ptr.p_double[i] = v; monitor->x.ptr.p_double[i] = v; } monitor->needfij = ae_true; monitor->rstateg0.stage = 0; goto lbl_rcomm; lbl_0: monitor->needfij = ae_false; for(i=0; i<=k-1; i++) { monitor->fbase.ptr.p_double[i] = monitor->fi.ptr.p_double[i]; for(j=0; j<=n-1; j++) { monitor->jbaseusr.ptr.pp_double[i][j] = monitor->j.ptr.pp_double[i][j]; monitor->rep.badgraduser.ptr.pp_double[i][j] = monitor->j.ptr.pp_double[i][j]; } } /* * Check Jacobian column by column */ varidx = 0; lbl_4: if( varidx>n-1 ) { goto lbl_6; } /* * Determine test location. */ v = monitor->xbase.ptr.p_double[varidx]; vm = v-s->ptr.p_double[varidx]*teststep; vp = v+s->ptr.p_double[varidx]*teststep; if( (hasboxconstraints&&ae_isfinite(bndl->ptr.p_double[varidx], _state))&&ae_fp_less(vm,bndl->ptr.p_double[varidx]) ) { vm = bndl->ptr.p_double[varidx]; } if( (hasboxconstraints&&ae_isfinite(bndu->ptr.p_double[varidx], _state))&&ae_fp_greater(vp,bndu->ptr.p_double[varidx]) ) { vp = bndu->ptr.p_double[varidx]; } vc = vm+(vp-vm)/2; /* * Quickly skip fixed variables */ if( (ae_fp_eq(vm,vp)||ae_fp_eq(vc,vm))||ae_fp_eq(vc,vp) ) { for(i=0; i<=k-1; i++) { monitor->rep.badgradnum.ptr.pp_double[i][varidx] = (double)(0); } goto lbl_5; } /* * Compute F/J at three trial points */ for(i=0; i<=n-1; i++) { monitor->x.ptr.p_double[i] = monitor->xbase.ptr.p_double[i]; } monitor->x.ptr.p_double[varidx] = vm; monitor->needfij = ae_true; monitor->rstateg0.stage = 1; goto lbl_rcomm; lbl_1: monitor->needfij = ae_false; for(i=0; i<=k-1; i++) { monitor->fm.ptr.p_double[i] = monitor->fi.ptr.p_double[i]; monitor->jm.ptr.p_double[i] = monitor->j.ptr.pp_double[i][varidx]; } for(i=0; i<=n-1; i++) { monitor->x.ptr.p_double[i] = monitor->xbase.ptr.p_double[i]; } monitor->x.ptr.p_double[varidx] = vc; monitor->needfij = ae_true; monitor->rstateg0.stage = 2; goto lbl_rcomm; lbl_2: monitor->needfij = ae_false; for(i=0; i<=k-1; i++) { monitor->fc.ptr.p_double[i] = monitor->fi.ptr.p_double[i]; monitor->jc.ptr.p_double[i] = monitor->j.ptr.pp_double[i][varidx]; } for(i=0; i<=n-1; i++) { monitor->x.ptr.p_double[i] = monitor->xbase.ptr.p_double[i]; } monitor->x.ptr.p_double[varidx] = vp; monitor->needfij = ae_true; monitor->rstateg0.stage = 3; goto lbl_rcomm; lbl_3: monitor->needfij = ae_false; for(i=0; i<=k-1; i++) { monitor->fp.ptr.p_double[i] = monitor->fi.ptr.p_double[i]; monitor->jp.ptr.p_double[i] = monitor->j.ptr.pp_double[i][varidx]; } /* * Check derivative */ for(i=0; i<=k-1; i++) { monitor->rep.badgradnum.ptr.pp_double[i][varidx] = (monitor->fp.ptr.p_double[i]-monitor->fm.ptr.p_double[i])/(vp-vm); if( !derivativecheck(monitor->fm.ptr.p_double[i], monitor->jm.ptr.p_double[i]*s->ptr.p_double[varidx], monitor->fp.ptr.p_double[i], monitor->jp.ptr.p_double[i]*s->ptr.p_double[varidx], monitor->fc.ptr.p_double[i], monitor->jc.ptr.p_double[i]*s->ptr.p_double[varidx], (vp-vm)/s->ptr.p_double[varidx], _state) ) { monitor->rep.badgradsuspected = ae_true; monitor->rep.badgradfidx = i; monitor->rep.badgradvidx = varidx; } } lbl_5: varidx = varidx+1; goto lbl_4; lbl_6: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; monitor->rstateg0.ia.ptr.p_int[0] = n; monitor->rstateg0.ia.ptr.p_int[1] = k; monitor->rstateg0.ia.ptr.p_int[2] = i; monitor->rstateg0.ia.ptr.p_int[3] = j; monitor->rstateg0.ia.ptr.p_int[4] = varidx; monitor->rstateg0.ra.ptr.p_double[0] = v; monitor->rstateg0.ra.ptr.p_double[1] = vp; monitor->rstateg0.ra.ptr.p_double[2] = vm; monitor->rstateg0.ra.ptr.p_double[3] = vc; return result; } /************************************************************************* This function calculates feasibility error (square root of sum of squared errors) for a Kx(NMain+NSlack) system of linear equalities. INPUT PARAMETERS: CE - set of K equality constraints, array[K,NMain+NSlack+1] X - candidate point, array [NMain+NSlack] NMain - number of primary variables NSlack - number of slack variables K - number of constraints Tmp0 - possible preallocated buffer, automatically resized RESULT: Sqrt(SUM(Err^2)) -- ALGLIB -- Copyright 17.09.2015 by Bochkanov Sergey *************************************************************************/ static double optserv_feasibilityerror(/* Real */ ae_matrix* ce, /* Real */ ae_vector* x, ae_int_t nmain, ae_int_t nslack, ae_int_t k, /* Real */ ae_vector* tmp0, ae_state *_state) { ae_int_t i; double result; rvectorsetlengthatleast(tmp0, k, _state); for(i=0; i<=k-1; i++) { tmp0->ptr.p_double[i] = -ce->ptr.pp_double[i][nmain+nslack]; } rmatrixgemv(k, nmain+nslack, 1.0, ce, 0, 0, 0, x, 0, 1.0, tmp0, 0, _state); result = 0.0; for(i=0; i<=k-1; i++) { result = result+tmp0->ptr.p_double[i]*tmp0->ptr.p_double[i]; } result = ae_sqrt(result, _state); return result; } /************************************************************************* This function calculates feasibility error (square root of sum of squared errors) for a Kx(NMain+NSlack) system of linear equalities and error gradient (with respect to x) INPUT PARAMETERS: CE - set of K equality constraints, array[K,NMain+NSlack+1] X - candidate point, array [NMain+NSlack] NMain - number of primary variables NSlack - number of slack variables K - number of constraints Grad - preallocated array[NMain+NSlack] Tmp0 - possible preallocated buffer, automatically resized RESULT: Err - Sqrt(SUM(Err^2)) Grad - error gradient with respect to X, array[NMain+NSlack] -- ALGLIB -- Copyright 17.09.2015 by Bochkanov Sergey *************************************************************************/ static void optserv_feasibilityerrorgrad(/* Real */ ae_matrix* ce, /* Real */ ae_vector* x, ae_int_t nmain, ae_int_t nslack, ae_int_t k, double* err, /* Real */ ae_vector* grad, /* Real */ ae_vector* tmp0, ae_state *_state) { ae_int_t i; double v; *err = 0; ae_assert(grad->cnt>=nmain+nslack, "FeasibilityErrorGrad: integrity check failed", _state); rvectorsetlengthatleast(tmp0, k, _state); rmatrixgemv(k, nmain+nslack, 1.0, ce, 0, 0, 0, x, 0, 0.0, tmp0, 0, _state); *err = 0.0; for(i=0; i<=k-1; i++) { v = tmp0->ptr.p_double[i]-ce->ptr.pp_double[i][nmain+nslack]; tmp0->ptr.p_double[i] = v; *err = *err+v*v; } *err = ae_sqrt(*err, _state); rmatrixgemv(nmain+nslack, k, 1.0, ce, 0, 0, 1, tmp0, 0, 0.0, grad, 0, _state); } /************************************************************************* This subroutine checks C0 continuity and returns continuity rating (normalized value, with values above 50-500 being good indication of the discontinuity) and Lipschitz constant. An interval between F1 and F2 is tested for (dis)continuity. Per-point noise estimates are provided. Delta[i] is a step from F[i] to F[i+1]. ApplySpecialCorrection parameter should be set to True if you use this function to estimate continuity of the model around minimum; it adds special correction which helps to detect "max(0,1/x)"-like discontinuities. Without this correction algorithm will still work, but will be a bit less powerful. Do not use this correction for situations when you want to estimate continuity around some non-extremal point - it may result in spurious discontinuities being reported. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ static void optserv_testc0continuity(double f0, double f1, double f2, double f3, double noise0, double noise1, double noise2, double noise3, double delta0, double delta1, double delta2, ae_bool applyspecialcorrection, double* rating, double* lipschitz, ae_state *_state) { double lipschitz01; double lipschitz12; double lipschitz23; *rating = 0; *lipschitz = 0; /* * Compute Lipschitz constant for the interval [0,1], * add noise correction in order to get increased estimate (makes * comparison below more conservative). */ lipschitz01 = (ae_fabs(f1-f0, _state)+(noise0+noise1))/delta0; /* * Compute Lipschitz constant for the interval [StpIdx+1,StpIdx+2], * SUBTRACT noise correction in order to get decreased estimate (makes * comparison below more conservative). */ lipschitz12 = ae_maxreal(ae_fabs(f2-f1, _state)-(noise1+noise2), 0.0, _state)/delta1; /* * Compute Lipschitz constant for the interval [StpIdx+2,StpIdx+3] * using special algorithm: * a) if F30, "OptGuard: integrity check failed", _state); *rating = lipschitz12/ae_maxreal(lipschitz01, lipschitz23, _state); *lipschitz = lipschitz12; } /************************************************************************* This subroutine checks C1 continuity using test #0 (function values from the line search log are studied, gradient is not used). An interval between F[StpIdx+0] and F[StpIdx+5]is tested for continuity. An normalized error metric (Lipschitz constant growth for the derivative) for the interval in question is calculated. Values above 50 are a good indication of the discontinuity. A six-point algorithm is used for testing, so we expect that Monitor.F and Monitor.Stp have enough points for this test. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ static void optserv_c1continuitytest0(smoothnessmonitor* monitor, ae_int_t funcidx, ae_int_t stpidx, ae_int_t sortedcnt, ae_state *_state) { double f0; double f1; double f2; double f3; double f4; double f5; double noise0; double noise1; double noise2; double noise3; double noise4; double noise5; double delta0; double delta1; double delta2; double delta3; double delta4; double d0; double d1; double d2; double d3; double newnoise0; double newnoise1; double newnoise2; double newnoise3; double newdelta0; double newdelta1; double newdelta2; double rating; double lipschitz; double lengthrating; ae_int_t i; ae_int_t n; double nrm; n = monitor->n; ae_assert(stpidx+5sortedstp.ptr.p_double[0],(double)(0)), "C1ContinuityTest0: integrity check failed", _state); ae_assert(ae_fp_greater(monitor->sortedstp.ptr.p_double[sortedcnt-1],(double)(0)), "C1ContinuityTest0: integrity check failed", _state); /* * Fetch F, noise, Delta's */ f0 = monitor->f.ptr.p_double[stpidx+0]; f1 = monitor->f.ptr.p_double[stpidx+1]; f2 = monitor->f.ptr.p_double[stpidx+2]; f3 = monitor->f.ptr.p_double[stpidx+3]; f4 = monitor->f.ptr.p_double[stpidx+4]; f5 = monitor->f.ptr.p_double[stpidx+5]; noise0 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f0, _state), 1.0, _state); noise1 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f1, _state), 1.0, _state); noise2 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f2, _state), 1.0, _state); noise3 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f3, _state), 1.0, _state); noise4 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f4, _state), 1.0, _state); noise5 = optserv_ognoiselevelf*ae_maxreal(ae_fabs(f5, _state), 1.0, _state); delta0 = monitor->sortedstp.ptr.p_double[stpidx+1]-monitor->sortedstp.ptr.p_double[stpidx+0]; delta1 = monitor->sortedstp.ptr.p_double[stpidx+2]-monitor->sortedstp.ptr.p_double[stpidx+1]; delta2 = monitor->sortedstp.ptr.p_double[stpidx+3]-monitor->sortedstp.ptr.p_double[stpidx+2]; delta3 = monitor->sortedstp.ptr.p_double[stpidx+4]-monitor->sortedstp.ptr.p_double[stpidx+3]; delta4 = monitor->sortedstp.ptr.p_double[stpidx+5]-monitor->sortedstp.ptr.p_double[stpidx+4]; /* * Differentiate functions, get derivative values and noise * estimates at points (0+1)/2, (1+2)/2, (3+4)/2, (3+4)/2, * (4+5)/2. Compute new step values NewDelta[i] and new * noise estimates. */ d0 = (f1-f0)/delta0; d1 = (f2-f1)/delta1; d2 = (f4-f3)/delta3; d3 = (f5-f4)/delta4; newnoise0 = (noise0+noise1)/delta0; newnoise1 = (noise1+noise2)/delta1; newnoise2 = (noise3+noise4)/delta3; newnoise3 = (noise4+noise5)/delta4; newdelta0 = 0.5*(delta0+delta1); newdelta1 = 0.5*delta1+delta2+0.5*delta3; newdelta2 = 0.5*(delta3+delta4); /* * Test with C0 continuity tester. "Special correction" is * turned off for this test. */ optserv_testc0continuity(d0, d1, d2, d3, newnoise0, newnoise1, newnoise2, newnoise3, newdelta0, newdelta1, newdelta2, ae_false, &rating, &lipschitz, _state); /* * Store results */ if( rating>optserv_ogminrating1 ) { /* * Store to total report */ monitor->rep.nonc1test0positive = ae_true; if( rating>monitor->nonc1currentrating ) { monitor->nonc1currentrating = rating; monitor->rep.nonc1suspected = ae_true; monitor->rep.nonc1lipschitzc = lipschitz; monitor->rep.nonc1fidx = funcidx; } /* * Store to "strongest" report */ if( rating>monitor->nonc1test0strrating ) { monitor->nonc1test0strrating = rating; monitor->nonc1test0strrep.positive = ae_true; monitor->nonc1test0strrep.fidx = funcidx; monitor->nonc1test0strrep.n = n; monitor->nonc1test0strrep.cnt = sortedcnt; monitor->nonc1test0strrep.stpidxa = stpidx+1; monitor->nonc1test0strrep.stpidxb = stpidx+4; rvectorsetlengthatleast(&monitor->nonc1test0strrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc1test0strrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc1test0strrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc1test0strrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc1test0strrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc1test0strrep.f, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc1test0strrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc1test0strrep.f.ptr.p_double[i] = monitor->f.ptr.p_double[i]; } } /* * Store to "longest" report */ nrm = (double)(0); for(i=0; i<=n-1; i++) { nrm = nrm+ae_sqr(monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]-monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[sortedcnt-1]*n+i], _state); } nrm = ae_sqrt(nrm, _state); nrm = ae_minreal(nrm, 1.0, _state); nrm = coalesce(nrm, ae_machineepsilon, _state); lengthrating = sortedcnt+ae_log(nrm, _state)/ae_log((double)(100), _state); if( lengthrating>monitor->nonc1test0lngrating ) { monitor->nonc1test0lngrating = lengthrating; monitor->nonc1test0lngrep.positive = ae_true; monitor->nonc1test0lngrep.fidx = funcidx; monitor->nonc1test0lngrep.n = n; monitor->nonc1test0lngrep.cnt = sortedcnt; monitor->nonc1test0lngrep.stpidxa = stpidx+1; monitor->nonc1test0lngrep.stpidxb = stpidx+4; rvectorsetlengthatleast(&monitor->nonc1test0lngrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc1test0lngrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc1test0lngrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc1test0lngrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc1test0lngrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc1test0lngrep.f, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc1test0lngrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc1test0lngrep.f.ptr.p_double[i] = monitor->f.ptr.p_double[i]; } } } } /************************************************************************* This subroutine checks C1 continuity using test #1 (individual gradient components from the line search log are studied for continuity). An interval between F[StpIdx+0] and F[StpIdx+3]is tested for continuity. An normalized error metric (Lipschitz constant growth for the derivative) for the interval in question is calculated. Values above 50 are a good indication of the discontinuity. -- ALGLIB -- Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ static void optserv_c1continuitytest1(smoothnessmonitor* monitor, ae_int_t funcidx, ae_int_t stpidx, ae_int_t sortedcnt, ae_state *_state) { ae_int_t i; ae_int_t varidx; ae_int_t n; double f0; double f1; double f2; double f3; double noise0; double noise1; double noise2; double noise3; double nrm; double rating; double lengthrating; double lipschitz; n = monitor->n; ae_assert(stpidx+3sortedstp.ptr.p_double[0],(double)(0)), "C1ContinuityTest1: integrity check failed", _state); ae_assert(ae_fp_greater(monitor->sortedstp.ptr.p_double[sortedcnt-1],(double)(0)), "C1ContinuityTest1: integrity check failed", _state); /* * Study each component of the gradient in the interval in question */ for(varidx=0; varidx<=n-1; varidx++) { f0 = monitor->g.ptr.p_double[(stpidx+0)*n+varidx]; f1 = monitor->g.ptr.p_double[(stpidx+1)*n+varidx]; f2 = monitor->g.ptr.p_double[(stpidx+2)*n+varidx]; f3 = monitor->g.ptr.p_double[(stpidx+3)*n+varidx]; noise0 = optserv_ognoiselevelg*ae_maxreal(ae_fabs(f0, _state), 1.0, _state); noise1 = optserv_ognoiselevelg*ae_maxreal(ae_fabs(f1, _state), 1.0, _state); noise2 = optserv_ognoiselevelg*ae_maxreal(ae_fabs(f2, _state), 1.0, _state); noise3 = optserv_ognoiselevelg*ae_maxreal(ae_fabs(f3, _state), 1.0, _state); optserv_testc0continuity(f0, f1, f2, f3, noise0, noise1, noise2, noise3, monitor->sortedstp.ptr.p_double[stpidx+1]-monitor->sortedstp.ptr.p_double[stpidx+0], monitor->sortedstp.ptr.p_double[stpidx+2]-monitor->sortedstp.ptr.p_double[stpidx+1], monitor->sortedstp.ptr.p_double[stpidx+3]-monitor->sortedstp.ptr.p_double[stpidx+2], ae_false, &rating, &lipschitz, _state); /* * Store results */ if( rating>optserv_ogminrating1 ) { /* * Store to total report */ monitor->rep.nonc1test1positive = ae_true; if( rating>monitor->nonc1currentrating ) { monitor->nonc1currentrating = rating; monitor->rep.nonc1suspected = ae_true; monitor->rep.nonc1lipschitzc = lipschitz; monitor->rep.nonc1fidx = funcidx; } /* * Store to "strongest" report */ if( rating>monitor->nonc1test1strrating ) { monitor->nonc1test1strrating = rating; monitor->nonc1test1strrep.positive = ae_true; monitor->nonc1test1strrep.fidx = funcidx; monitor->nonc1test1strrep.vidx = varidx; monitor->nonc1test1strrep.n = n; monitor->nonc1test1strrep.cnt = sortedcnt; monitor->nonc1test1strrep.stpidxa = stpidx+0; monitor->nonc1test1strrep.stpidxb = stpidx+3; rvectorsetlengthatleast(&monitor->nonc1test1strrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc1test1strrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc1test1strrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc1test1strrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc1test1strrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc1test1strrep.g, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc1test1strrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc1test1strrep.g.ptr.p_double[i] = monitor->g.ptr.p_double[i*n+varidx]; } } /* * Store to "longest" report */ nrm = (double)(0); for(i=0; i<=n-1; i++) { nrm = nrm+ae_sqr(monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]-monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[sortedcnt-1]*n+i], _state); } nrm = ae_sqrt(nrm, _state); nrm = ae_minreal(nrm, 1.0, _state); nrm = coalesce(nrm, ae_machineepsilon, _state); lengthrating = sortedcnt+ae_log(nrm, _state)/ae_log((double)(100), _state); if( lengthrating>monitor->nonc1test1lngrating ) { monitor->nonc1test1lngrating = lengthrating; monitor->nonc1test1lngrep.positive = ae_true; monitor->nonc1test1lngrep.fidx = funcidx; monitor->nonc1test1lngrep.vidx = varidx; monitor->nonc1test1lngrep.n = n; monitor->nonc1test1lngrep.cnt = sortedcnt; monitor->nonc1test1lngrep.stpidxa = stpidx+0; monitor->nonc1test1lngrep.stpidxb = stpidx+3; rvectorsetlengthatleast(&monitor->nonc1test1lngrep.x0, n, _state); rvectorsetlengthatleast(&monitor->nonc1test1lngrep.d, n, _state); for(i=0; i<=n-1; i++) { monitor->nonc1test1lngrep.x0.ptr.p_double[i] = monitor->enqueuedx.ptr.p_double[monitor->sortedidx.ptr.p_int[0]*n+i]; monitor->nonc1test1lngrep.d.ptr.p_double[i] = monitor->dcur.ptr.p_double[i]; } rvectorsetlengthatleast(&monitor->nonc1test1lngrep.stp, sortedcnt, _state); rvectorsetlengthatleast(&monitor->nonc1test1lngrep.g, sortedcnt, _state); for(i=0; i<=sortedcnt-1; i++) { monitor->nonc1test1lngrep.stp.ptr.p_double[i] = monitor->sortedstp.ptr.p_double[i]; monitor->nonc1test1lngrep.g.ptr.p_double[i] = monitor->g.ptr.p_double[i*n+varidx]; } } } } } void _precbuflbfgs_init(void* _p, ae_state *_state, ae_bool make_automatic) { precbuflbfgs *p = (precbuflbfgs*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->norms, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->alpha, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rho, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->yk, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->idx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->bufa, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufb, 0, DT_INT, _state, make_automatic); } void _precbuflbfgs_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { precbuflbfgs *dst = (precbuflbfgs*)_dst; precbuflbfgs *src = (precbuflbfgs*)_src; ae_vector_init_copy(&dst->norms, &src->norms, _state, make_automatic); ae_vector_init_copy(&dst->alpha, &src->alpha, _state, make_automatic); ae_vector_init_copy(&dst->rho, &src->rho, _state, make_automatic); ae_matrix_init_copy(&dst->yk, &src->yk, _state, make_automatic); ae_vector_init_copy(&dst->idx, &src->idx, _state, make_automatic); ae_vector_init_copy(&dst->bufa, &src->bufa, _state, make_automatic); ae_vector_init_copy(&dst->bufb, &src->bufb, _state, make_automatic); } void _precbuflbfgs_clear(void* _p) { precbuflbfgs *p = (precbuflbfgs*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->norms); ae_vector_clear(&p->alpha); ae_vector_clear(&p->rho); ae_matrix_clear(&p->yk); ae_vector_clear(&p->idx); ae_vector_clear(&p->bufa); ae_vector_clear(&p->bufb); } void _precbuflbfgs_destroy(void* _p) { precbuflbfgs *p = (precbuflbfgs*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->norms); ae_vector_destroy(&p->alpha); ae_vector_destroy(&p->rho); ae_matrix_destroy(&p->yk); ae_vector_destroy(&p->idx); ae_vector_destroy(&p->bufa); ae_vector_destroy(&p->bufb); } void _precbuflowrank_init(void* _p, ae_state *_state, ae_bool make_automatic) { precbuflowrank *p = (precbuflowrank*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->v, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufc, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufz, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufw, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp, 0, DT_REAL, _state, make_automatic); } void _precbuflowrank_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { precbuflowrank *dst = (precbuflowrank*)_dst; precbuflowrank *src = (precbuflowrank*)_src; dst->n = src->n; dst->k = src->k; ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_matrix_init_copy(&dst->v, &src->v, _state, make_automatic); ae_vector_init_copy(&dst->bufc, &src->bufc, _state, make_automatic); ae_matrix_init_copy(&dst->bufz, &src->bufz, _state, make_automatic); ae_matrix_init_copy(&dst->bufw, &src->bufw, _state, make_automatic); ae_vector_init_copy(&dst->tmp, &src->tmp, _state, make_automatic); } void _precbuflowrank_clear(void* _p) { precbuflowrank *p = (precbuflowrank*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->d); ae_matrix_clear(&p->v); ae_vector_clear(&p->bufc); ae_matrix_clear(&p->bufz); ae_matrix_clear(&p->bufw); ae_vector_clear(&p->tmp); } void _precbuflowrank_destroy(void* _p) { precbuflowrank *p = (precbuflowrank*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->d); ae_matrix_destroy(&p->v); ae_vector_destroy(&p->bufc); ae_matrix_destroy(&p->bufz); ae_matrix_destroy(&p->bufw); ae_vector_destroy(&p->tmp); } void _smoothnessmonitor_init(void* _p, ae_state *_state, ae_bool make_automatic) { smoothnessmonitor *p = (smoothnessmonitor*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->dcur, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->enqueuedstp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->enqueuedx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->enqueuedfunc, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->enqueuedjac, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sortedstp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sortedidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->probingf, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->probingsteps, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->probingvalues, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->probingslopes, 0, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->probingrcomm, _state, make_automatic); _optguardreport_init(&p->rep, _state, make_automatic); _optguardnonc0report_init(&p->nonc0strrep, _state, make_automatic); _optguardnonc0report_init(&p->nonc0lngrep, _state, make_automatic); _optguardnonc1test0report_init(&p->nonc1test0strrep, _state, make_automatic); _optguardnonc1test0report_init(&p->nonc1test0lngrep, _state, make_automatic); _optguardnonc1test1report_init(&p->nonc1test1strrep, _state, make_automatic); _optguardnonc1test1report_init(&p->nonc1test1lngrep, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstateg0, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fm, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->jm, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->jc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->jp, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->jbaseusr, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->jbasenum, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufr, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->f, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->deltax, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->bufi, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->xu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->du, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->f0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j0, 0, 0, DT_REAL, _state, make_automatic); } void _smoothnessmonitor_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { smoothnessmonitor *dst = (smoothnessmonitor*)_dst; smoothnessmonitor *src = (smoothnessmonitor*)_src; dst->n = src->n; dst->k = src->k; dst->checksmoothness = src->checksmoothness; ae_vector_init_copy(&dst->dcur, &src->dcur, _state, make_automatic); dst->enqueuedcnt = src->enqueuedcnt; ae_vector_init_copy(&dst->enqueuedstp, &src->enqueuedstp, _state, make_automatic); ae_vector_init_copy(&dst->enqueuedx, &src->enqueuedx, _state, make_automatic); ae_vector_init_copy(&dst->enqueuedfunc, &src->enqueuedfunc, _state, make_automatic); ae_matrix_init_copy(&dst->enqueuedjac, &src->enqueuedjac, _state, make_automatic); ae_vector_init_copy(&dst->sortedstp, &src->sortedstp, _state, make_automatic); ae_vector_init_copy(&dst->sortedidx, &src->sortedidx, _state, make_automatic); dst->sortedcnt = src->sortedcnt; dst->probingstp = src->probingstp; ae_vector_init_copy(&dst->probingf, &src->probingf, _state, make_automatic); dst->probingnvalues = src->probingnvalues; dst->probingstepmax = src->probingstepmax; dst->probingstepscale = src->probingstepscale; dst->probingnstepsstored = src->probingnstepsstored; ae_vector_init_copy(&dst->probingsteps, &src->probingsteps, _state, make_automatic); ae_matrix_init_copy(&dst->probingvalues, &src->probingvalues, _state, make_automatic); ae_matrix_init_copy(&dst->probingslopes, &src->probingslopes, _state, make_automatic); _rcommstate_init_copy(&dst->probingrcomm, &src->probingrcomm, _state, make_automatic); dst->linesearchspoiled = src->linesearchspoiled; dst->linesearchstarted = src->linesearchstarted; dst->nonc0currentrating = src->nonc0currentrating; dst->nonc1currentrating = src->nonc1currentrating; dst->badgradhasxj = src->badgradhasxj; _optguardreport_init_copy(&dst->rep, &src->rep, _state, make_automatic); dst->nonc0strrating = src->nonc0strrating; dst->nonc0lngrating = src->nonc0lngrating; _optguardnonc0report_init_copy(&dst->nonc0strrep, &src->nonc0strrep, _state, make_automatic); _optguardnonc0report_init_copy(&dst->nonc0lngrep, &src->nonc0lngrep, _state, make_automatic); dst->nonc1test0strrating = src->nonc1test0strrating; dst->nonc1test0lngrating = src->nonc1test0lngrating; _optguardnonc1test0report_init_copy(&dst->nonc1test0strrep, &src->nonc1test0strrep, _state, make_automatic); _optguardnonc1test0report_init_copy(&dst->nonc1test0lngrep, &src->nonc1test0lngrep, _state, make_automatic); dst->nonc1test1strrating = src->nonc1test1strrating; dst->nonc1test1lngrating = src->nonc1test1lngrating; _optguardnonc1test1report_init_copy(&dst->nonc1test1strrep, &src->nonc1test1strrep, _state, make_automatic); _optguardnonc1test1report_init_copy(&dst->nonc1test1lngrep, &src->nonc1test1lngrep, _state, make_automatic); dst->needfij = src->needfij; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); _rcommstate_init_copy(&dst->rstateg0, &src->rstateg0, _state, make_automatic); ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); ae_vector_init_copy(&dst->fbase, &src->fbase, _state, make_automatic); ae_vector_init_copy(&dst->fm, &src->fm, _state, make_automatic); ae_vector_init_copy(&dst->fc, &src->fc, _state, make_automatic); ae_vector_init_copy(&dst->fp, &src->fp, _state, make_automatic); ae_vector_init_copy(&dst->jm, &src->jm, _state, make_automatic); ae_vector_init_copy(&dst->jc, &src->jc, _state, make_automatic); ae_vector_init_copy(&dst->jp, &src->jp, _state, make_automatic); ae_matrix_init_copy(&dst->jbaseusr, &src->jbaseusr, _state, make_automatic); ae_matrix_init_copy(&dst->jbasenum, &src->jbasenum, _state, make_automatic); ae_vector_init_copy(&dst->stp, &src->stp, _state, make_automatic); ae_vector_init_copy(&dst->bufr, &src->bufr, _state, make_automatic); ae_vector_init_copy(&dst->f, &src->f, _state, make_automatic); ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); ae_vector_init_copy(&dst->deltax, &src->deltax, _state, make_automatic); ae_vector_init_copy(&dst->tmpidx, &src->tmpidx, _state, make_automatic); ae_vector_init_copy(&dst->bufi, &src->bufi, _state, make_automatic); ae_vector_init_copy(&dst->xu, &src->xu, _state, make_automatic); ae_vector_init_copy(&dst->du, &src->du, _state, make_automatic); ae_vector_init_copy(&dst->f0, &src->f0, _state, make_automatic); ae_matrix_init_copy(&dst->j0, &src->j0, _state, make_automatic); } void _smoothnessmonitor_clear(void* _p) { smoothnessmonitor *p = (smoothnessmonitor*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->dcur); ae_vector_clear(&p->enqueuedstp); ae_vector_clear(&p->enqueuedx); ae_vector_clear(&p->enqueuedfunc); ae_matrix_clear(&p->enqueuedjac); ae_vector_clear(&p->sortedstp); ae_vector_clear(&p->sortedidx); ae_vector_clear(&p->probingf); ae_vector_clear(&p->probingsteps); ae_matrix_clear(&p->probingvalues); ae_matrix_clear(&p->probingslopes); _rcommstate_clear(&p->probingrcomm); _optguardreport_clear(&p->rep); _optguardnonc0report_clear(&p->nonc0strrep); _optguardnonc0report_clear(&p->nonc0lngrep); _optguardnonc1test0report_clear(&p->nonc1test0strrep); _optguardnonc1test0report_clear(&p->nonc1test0lngrep); _optguardnonc1test1report_clear(&p->nonc1test1strrep); _optguardnonc1test1report_clear(&p->nonc1test1lngrep); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); _rcommstate_clear(&p->rstateg0); ae_vector_clear(&p->xbase); ae_vector_clear(&p->fbase); ae_vector_clear(&p->fm); ae_vector_clear(&p->fc); ae_vector_clear(&p->fp); ae_vector_clear(&p->jm); ae_vector_clear(&p->jc); ae_vector_clear(&p->jp); ae_matrix_clear(&p->jbaseusr); ae_matrix_clear(&p->jbasenum); ae_vector_clear(&p->stp); ae_vector_clear(&p->bufr); ae_vector_clear(&p->f); ae_vector_clear(&p->g); ae_vector_clear(&p->deltax); ae_vector_clear(&p->tmpidx); ae_vector_clear(&p->bufi); ae_vector_clear(&p->xu); ae_vector_clear(&p->du); ae_vector_clear(&p->f0); ae_matrix_clear(&p->j0); } void _smoothnessmonitor_destroy(void* _p) { smoothnessmonitor *p = (smoothnessmonitor*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->dcur); ae_vector_destroy(&p->enqueuedstp); ae_vector_destroy(&p->enqueuedx); ae_vector_destroy(&p->enqueuedfunc); ae_matrix_destroy(&p->enqueuedjac); ae_vector_destroy(&p->sortedstp); ae_vector_destroy(&p->sortedidx); ae_vector_destroy(&p->probingf); ae_vector_destroy(&p->probingsteps); ae_matrix_destroy(&p->probingvalues); ae_matrix_destroy(&p->probingslopes); _rcommstate_destroy(&p->probingrcomm); _optguardreport_destroy(&p->rep); _optguardnonc0report_destroy(&p->nonc0strrep); _optguardnonc0report_destroy(&p->nonc0lngrep); _optguardnonc1test0report_destroy(&p->nonc1test0strrep); _optguardnonc1test0report_destroy(&p->nonc1test0lngrep); _optguardnonc1test1report_destroy(&p->nonc1test1strrep); _optguardnonc1test1report_destroy(&p->nonc1test1lngrep); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); _rcommstate_destroy(&p->rstateg0); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->fbase); ae_vector_destroy(&p->fm); ae_vector_destroy(&p->fc); ae_vector_destroy(&p->fp); ae_vector_destroy(&p->jm); ae_vector_destroy(&p->jc); ae_vector_destroy(&p->jp); ae_matrix_destroy(&p->jbaseusr); ae_matrix_destroy(&p->jbasenum); ae_vector_destroy(&p->stp); ae_vector_destroy(&p->bufr); ae_vector_destroy(&p->f); ae_vector_destroy(&p->g); ae_vector_destroy(&p->deltax); ae_vector_destroy(&p->tmpidx); ae_vector_destroy(&p->bufi); ae_vector_destroy(&p->xu); ae_vector_destroy(&p->du); ae_vector_destroy(&p->f0); ae_matrix_destroy(&p->j0); } #endif #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This subroutine is used to initialize SNNLS solver. By default, empty NNLS problem is produced, but we allocated enough space to store problems with NSMax+NDMax columns and NRMax rows. It is good place to provide algorithm with initial estimate of the space requirements, although you may underestimate problem size or even pass zero estimates - in this case buffer variables will be resized automatically when you set NNLS problem. Previously allocated buffer variables are reused as much as possible. This function does not clear structure completely, it tries to preserve as much dynamically allocated memory as possible. -- ALGLIB -- Copyright 10.10.2012 by Bochkanov Sergey *************************************************************************/ void snnlsinit(ae_int_t nsmax, ae_int_t ndmax, ae_int_t nrmax, snnlssolver* s, ae_state *_state) { s->ns = 0; s->nd = 0; s->nr = 0; rmatrixsetlengthatleast(&s->densea, nrmax, ndmax, _state); rmatrixsetlengthatleast(&s->tmpca, nrmax, ndmax, _state); rvectorsetlengthatleast(&s->b, nrmax, _state); bvectorsetlengthatleast(&s->nnc, nsmax+ndmax, _state); s->debugflops = 0.0; s->debugmaxinnerits = 0; } /************************************************************************* This subroutine is used to set NNLS problem: ( [ 1 | ] [ ] [ ] )^2 ( [ 1 | ] [ ] [ ] ) min ( [ 1 | Ad ] * [ x ] - [ b ] ) s.t. x>=0 ( [ | ] [ ] [ ] ) ( [ | ] [ ] [ ] ) where: * identity matrix has NS*NS size (NS<=NR, NS can be zero) * dense matrix Ad has NR*ND size * b is NR*1 vector * x is (NS+ND)*1 vector * all elements of x are non-negative (this constraint can be removed later by calling SNNLSDropNNC() function) Previously allocated buffer variables are reused as much as possible. After you set problem, you can solve it with SNNLSSolve(). INPUT PARAMETERS: S - SNNLS solver, must be initialized with SNNLSInit() call A - array[NR,ND], dense part of the system B - array[NR], right part NS - size of the sparse part of the system, 0<=NS<=NR ND - size of the dense part of the system, ND>=0 NR - rows count, NR>0 NOTE: 1. You can have NS+ND=0, solver will correctly accept such combination and return empty array as problem solution. -- ALGLIB -- Copyright 10.10.2012 by Bochkanov Sergey *************************************************************************/ void snnlssetproblem(snnlssolver* s, /* Real */ ae_matrix* a, /* Real */ ae_vector* b, ae_int_t ns, ae_int_t nd, ae_int_t nr, ae_state *_state) { ae_int_t i; ae_assert(nd>=0, "SNNLSSetProblem: ND<0", _state); ae_assert(ns>=0, "SNNLSSetProblem: NS<0", _state); ae_assert(nr>0, "SNNLSSetProblem: NR<=0", _state); ae_assert(ns<=nr, "SNNLSSetProblem: NS>NR", _state); ae_assert(a->rows>=nr||nd==0, "SNNLSSetProblem: rows(A)cols>=nd, "SNNLSSetProblem: cols(A)cnt>=nr, "SNNLSSetProblem: length(B)ns = ns; s->nd = nd; s->nr = nr; if( nd>0 ) { rmatrixsetlengthatleast(&s->densea, nr, nd, _state); for(i=0; i<=nr-1; i++) { ae_v_move(&s->densea.ptr.pp_double[i][0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,nd-1)); } } rvectorsetlengthatleast(&s->b, nr, _state); ae_v_move(&s->b.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,nr-1)); bvectorsetlengthatleast(&s->nnc, ns+nd, _state); for(i=0; i<=ns+nd-1; i++) { s->nnc.ptr.p_bool[i] = ae_true; } } /************************************************************************* This subroutine drops non-negativity constraint from the problem set by SNNLSSetProblem() call. This function must be called AFTER problem is set, because each SetProblem() call resets constraints to their default state (all constraints are present). INPUT PARAMETERS: S - SNNLS solver, must be initialized with SNNLSInit() call, problem must be set with SNNLSSetProblem() call. Idx - constraint index, 0<=IDX=0, "SNNLSDropNNC: Idx<0", _state); ae_assert(idxns+s->nd, "SNNLSDropNNC: Idx>=NS+ND", _state); s->nnc.ptr.p_bool[idx] = ae_false; } /************************************************************************* This subroutine is used to solve NNLS problem. INPUT PARAMETERS: S - SNNLS solver, must be initialized with SNNLSInit() call and problem must be set up with SNNLSSetProblem() call. X - possibly preallocated buffer, automatically resized if needed OUTPUT PARAMETERS: X - array[NS+ND], solution NOTE: 1. You can have NS+ND=0, solver will correctly accept such combination and return empty array as problem solution. 2. Internal field S.DebugFLOPS contains rough estimate of FLOPs used to solve problem. It can be used for debugging purposes. This field is real-valued. -- ALGLIB -- Copyright 10.10.2012 by Bochkanov Sergey *************************************************************************/ void snnlssolve(snnlssolver* s, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; ae_int_t ns; ae_int_t nd; ae_int_t nr; ae_bool wasactivation; double lambdav; double v0; double v1; double v; ae_int_t outerits; ae_int_t innerits; ae_int_t maxouterits; double xtol; double kicklength; ae_bool kickneeded; double f0; double f1; double dnrm; ae_int_t actidx; double stp; double stpmax; /* * Prepare */ ns = s->ns; nd = s->nd; nr = s->nr; s->debugflops = 0.0; /* * Handle special cases: * * NS+ND=0 * * ND=0 */ if( ns+nd==0 ) { return; } if( nd==0 ) { rvectorsetlengthatleast(x, ns, _state); for(i=0; i<=ns-1; i++) { x->ptr.p_double[i] = s->b.ptr.p_double[i]; if( s->nnc.ptr.p_bool[i] ) { x->ptr.p_double[i] = ae_maxreal(x->ptr.p_double[i], 0.0, _state); } } return; } /* * Main cycle of BLEIC-SNNLS algorithm. * Below we assume that ND>0. */ rvectorsetlengthatleast(x, ns+nd, _state); rvectorsetlengthatleast(&s->xn, ns+nd, _state); rvectorsetlengthatleast(&s->xp, ns+nd, _state); rvectorsetlengthatleast(&s->g, ns+nd, _state); rvectorsetlengthatleast(&s->d, ns+nd, _state); rvectorsetlengthatleast(&s->r, nr, _state); rvectorsetlengthatleast(&s->diagaa, nd, _state); rvectorsetlengthatleast(&s->regdiag, ns+nd, _state); rvectorsetlengthatleast(&s->dx, ns+nd, _state); for(i=0; i<=ns+nd-1; i++) { x->ptr.p_double[i] = 0.0; s->regdiag.ptr.p_double[i] = 1.0; } lambdav = 1.0E6*ae_machineepsilon; maxouterits = 10; outerits = 0; innerits = 0; xtol = 1.0E3*ae_machineepsilon; kicklength = ae_sqrt(ae_minrealnumber, _state); for(;;) { /* * Initial check for correctness of X */ for(i=0; i<=ns+nd-1; i++) { ae_assert(!s->nnc.ptr.p_bool[i]||ae_fp_greater_eq(x->ptr.p_double[i],(double)(0)), "SNNLS: integrity check failed", _state); } /* * Calculate gradient G and constrained descent direction D */ snnls_funcgradu(s, x, &s->r, &s->g, &f0, _state); for(i=0; i<=ns+nd-1; i++) { if( (s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],(double)(0)))&&ae_fp_greater(s->g.ptr.p_double[i],(double)(0)) ) { s->d.ptr.p_double[i] = 0.0; } else { s->d.ptr.p_double[i] = -s->g.ptr.p_double[i]; } } /* * Decide whether we need "kick" stage: special stage * that moves us away from boundary constraints which are * not strictly active (i.e. such constraints that x[i]=0.0 and d[i]>0). * * If we need kick stage, we make a kick - and restart iteration. * If not, after this block we can rely on the fact that * for all x[i]=0.0 we have d[i]=0.0 * * NOTE: we do not increase outer iterations counter here */ kickneeded = ae_false; for(i=0; i<=ns+nd-1; i++) { if( (s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0.0))&&ae_fp_greater(s->d.ptr.p_double[i],0.0) ) { kickneeded = ae_true; } } if( kickneeded ) { /* * Perform kick. * Restart. * Do not increase iterations counter. */ for(i=0; i<=ns+nd-1; i++) { if( ae_fp_eq(x->ptr.p_double[i],0.0)&&ae_fp_greater(s->d.ptr.p_double[i],0.0) ) { x->ptr.p_double[i] = x->ptr.p_double[i]+kicklength; } } continue; } /* * Newton phase * Reduce problem to constrained triangular form and perform Newton * steps with quick activation of constrants (triangular form is * updated in order to handle changed constraints). */ for(i=0; i<=ns+nd-1; i++) { s->xp.ptr.p_double[i] = x->ptr.p_double[i]; } snnls_trdprepare(s, x, &s->regdiag, lambdav, &s->trdd, &s->trda, &s->tmp0, &s->tmp1, &s->tmp2, &s->tmplq, _state); for(;;) { /* * Skip if debug limit on inner iterations count is turned on. */ if( s->debugmaxinnerits>0&&innerits>=s->debugmaxinnerits ) { break; } /* * Prepare step vector. */ snnls_funcgradu(s, x, &s->r, &s->g, &f0, _state); for(i=0; i<=ns+nd-1; i++) { s->d.ptr.p_double[i] = -s->g.ptr.p_double[i]; if( s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0.0) ) { s->d.ptr.p_double[i] = 0.0; } } snnls_trdsolve(&s->trdd, &s->trda, ns, nd, &s->d, _state); /* * Perform unconstrained trial step and compare function values. */ for(i=0; i<=ns+nd-1; i++) { s->xn.ptr.p_double[i] = x->ptr.p_double[i]+s->d.ptr.p_double[i]; } snnls_func(s, &s->xn, &f1, _state); if( ae_fp_greater_eq(f1,f0) ) { break; } /* * Calculate length of D, maximum step and component which is * activated by this step. Break if D is exactly zero. */ dnrm = 0.0; for(i=0; i<=ns+nd-1; i++) { dnrm = dnrm+ae_sqr(s->d.ptr.p_double[i], _state); } dnrm = ae_sqrt(dnrm, _state); actidx = -1; stpmax = 1.0E50; for(i=0; i<=ns+nd-1; i++) { if( s->nnc.ptr.p_bool[i]&&ae_fp_less(s->d.ptr.p_double[i],0.0) ) { v = stpmax; stpmax = safeminposrv(x->ptr.p_double[i], -s->d.ptr.p_double[i], stpmax, _state); if( ae_fp_less(stpmax,v) ) { actidx = i; } } } if( ae_fp_eq(dnrm,0.0) ) { break; } /* * Perform constrained step and update X * and triangular model. */ stp = ae_minreal(1.0, stpmax, _state); for(i=0; i<=ns+nd-1; i++) { v = x->ptr.p_double[i]+stp*s->d.ptr.p_double[i]; if( s->nnc.ptr.p_bool[i] ) { v = ae_maxreal(v, 0.0, _state); } s->xn.ptr.p_double[i] = v; } if( ae_fp_eq(stp,stpmax)&&actidx>=0 ) { s->xn.ptr.p_double[actidx] = 0.0; } wasactivation = ae_false; for(i=0; i<=ns+nd-1; i++) { if( ae_fp_eq(s->xn.ptr.p_double[i],0.0)&&ae_fp_neq(x->ptr.p_double[i],0.0) ) { wasactivation = ae_true; snnls_trdfixvariable(&s->trdd, &s->trda, ns, nd, i, &s->tmpcholesky, _state); } } for(i=0; i<=ns+nd-1; i++) { x->ptr.p_double[i] = s->xn.ptr.p_double[i]; } /* * Increment iterations counter. * Terminate if no constraint was activated. */ inc(&innerits, _state); if( !wasactivation ) { break; } } /* * Update outer iterations counter. * * Break if necessary: * * maximum number of outer iterations performed * * relative change in X is small enough */ inc(&outerits, _state); if( outerits>=maxouterits ) { break; } v = (double)(0); for(i=0; i<=ns+nd-1; i++) { v0 = ae_fabs(s->xp.ptr.p_double[i], _state); v1 = ae_fabs(x->ptr.p_double[i], _state); if( ae_fp_neq(v0,(double)(0))||ae_fp_neq(v1,(double)(0)) ) { v = ae_maxreal(v, ae_fabs(x->ptr.p_double[i]-s->xp.ptr.p_double[i], _state)/ae_maxreal(v0, v1, _state), _state); } } if( ae_fp_less_eq(v,xtol) ) { break; } } } /************************************************************************* This function calculates: * residual vector R = A*x-b * unconstrained gradient vector G * function value F = 0.5*|R|^2 R and G must have at least N elements. -- ALGLIB -- Copyright 15.07.2015 by Bochkanov Sergey *************************************************************************/ static void snnls_funcgradu(snnlssolver* s, /* Real */ ae_vector* x, /* Real */ ae_vector* r, /* Real */ ae_vector* g, double* f, ae_state *_state) { ae_int_t i; ae_int_t nr; ae_int_t nd; ae_int_t ns; double v; *f = 0; nr = s->nr; nd = s->nd; ns = s->ns; *f = 0.0; for(i=0; i<=nr-1; i++) { v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &x->ptr.p_double[ns], 1, ae_v_len(0,nd-1)); if( iptr.p_double[i]; } v = v-s->b.ptr.p_double[i]; r->ptr.p_double[i] = v; *f = *f+0.5*v*v; } for(i=0; i<=ns-1; i++) { g->ptr.p_double[i] = r->ptr.p_double[i]; } for(i=ns; i<=ns+nd-1; i++) { g->ptr.p_double[i] = 0.0; } for(i=0; i<=nr-1; i++) { v = r->ptr.p_double[i]; ae_v_addd(&g->ptr.p_double[ns], 1, &s->densea.ptr.pp_double[i][0], 1, ae_v_len(ns,ns+nd-1), v); } } /************************************************************************* This function calculates function value F = 0.5*|R|^2 at X. -- ALGLIB -- Copyright 15.07.2015 by Bochkanov Sergey *************************************************************************/ static void snnls_func(snnlssolver* s, /* Real */ ae_vector* x, double* f, ae_state *_state) { ae_int_t i; ae_int_t nr; ae_int_t nd; ae_int_t ns; double v; *f = 0; nr = s->nr; nd = s->nd; ns = s->ns; *f = 0.0; for(i=0; i<=nr-1; i++) { v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &x->ptr.p_double[ns], 1, ae_v_len(0,nd-1)); if( iptr.p_double[i]; } v = v-s->b.ptr.p_double[i]; *f = *f+0.5*v*v; } } static void snnls_trdprepare(snnlssolver* s, /* Real */ ae_vector* x, /* Real */ ae_vector* diag, double lambdav, /* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, /* Real */ ae_vector* tmp2, /* Real */ ae_matrix* tmplq, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t ns; ae_int_t nd; ae_int_t nr; double v; double cs; double sn; double r; /* * Prepare */ ns = s->ns; nd = s->nd; nr = s->nr; /* * Triangular reduction */ rvectorsetlengthatleast(trdd, ns, _state); rmatrixsetlengthatleast(trda, ns+nd, nd, _state); rmatrixsetlengthatleast(tmplq, nd, nr+nd, _state); for(i=0; i<=ns-1; i++) { /* * Apply rotation to I-th row and corresponding row of * regularizer. Here V is diagonal element of I-th row, * which is set to 1.0 or 0.0 depending on variable * status (constrained or not). */ v = 1.0; if( s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0.0) ) { v = 0.0; } generaterotation(v, lambdav, &cs, &sn, &r, _state); trdd->ptr.p_double[i] = cs*v+sn*lambdav; for(j=0; j<=nd-1; j++) { v = s->densea.ptr.pp_double[i][j]; trda->ptr.pp_double[i][j] = cs*v; tmplq->ptr.pp_double[j][i] = -sn*v; } } for(j=0; j<=nd-1; j++) { for(i=ns; i<=nr-1; i++) { tmplq->ptr.pp_double[j][i] = s->densea.ptr.pp_double[i][j]; } } for(j=0; j<=nd-1; j++) { if( s->nnc.ptr.p_bool[ns+j]&&ae_fp_eq(x->ptr.p_double[ns+j],0.0) ) { /* * Variable is constrained, entire row is set to zero. */ for(i=0; i<=nr-1; i++) { tmplq->ptr.pp_double[j][i] = 0.0; } for(i=0; i<=ns-1; i++) { trda->ptr.pp_double[i][j] = 0.0; } } } for(i=0; i<=nd-1; i++) { for(j=0; j<=nd-1; j++) { tmplq->ptr.pp_double[j][nr+i] = 0.0; } tmplq->ptr.pp_double[i][nr+i] = lambdav*diag->ptr.p_double[i]; } rvectorsetlengthatleast(tmp0, nr+nd+1, _state); rvectorsetlengthatleast(tmp1, nr+nd+1, _state); rvectorsetlengthatleast(tmp2, nr+nd+1, _state); rmatrixlqbasecase(tmplq, nd, nr+nd, tmp0, tmp1, tmp2, _state); for(i=0; i<=nd-1; i++) { if( ae_fp_less(tmplq->ptr.pp_double[i][i],0.0) ) { for(j=i; j<=nd-1; j++) { tmplq->ptr.pp_double[j][i] = -tmplq->ptr.pp_double[j][i]; } } } for(i=0; i<=nd-1; i++) { for(j=0; j<=i; j++) { trda->ptr.pp_double[ns+j][i] = tmplq->ptr.pp_double[i][j]; } } } static void snnls_trdsolve(/* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, ae_int_t ns, ae_int_t nd, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_int_t j; double v; /* * Solve U'*y=d first. * * This section includes two parts: * * solve diagonal part of U' * * solve dense part of U' */ for(i=0; i<=ns-1; i++) { d->ptr.p_double[i] = d->ptr.p_double[i]/trdd->ptr.p_double[i]; v = d->ptr.p_double[i]; for(j=0; j<=nd-1; j++) { d->ptr.p_double[ns+j] = d->ptr.p_double[ns+j]-v*trda->ptr.pp_double[i][j]; } } for(i=0; i<=nd-1; i++) { d->ptr.p_double[ns+i] = d->ptr.p_double[ns+i]/trda->ptr.pp_double[ns+i][i]; v = d->ptr.p_double[ns+i]; for(j=i+1; j<=nd-1; j++) { d->ptr.p_double[ns+j] = d->ptr.p_double[ns+j]-v*trda->ptr.pp_double[ns+i][j]; } } /* * Solve U*x=y then. * * This section includes two parts: * * solve trailing triangular part of U * * solve combination of diagonal and dense parts of U */ for(i=nd-1; i>=0; i--) { v = 0.0; for(j=i+1; j<=nd-1; j++) { v = v+trda->ptr.pp_double[ns+i][j]*d->ptr.p_double[ns+j]; } d->ptr.p_double[ns+i] = (d->ptr.p_double[ns+i]-v)/trda->ptr.pp_double[ns+i][i]; } for(i=ns-1; i>=0; i--) { v = 0.0; for(j=0; j<=nd-1; j++) { v = v+trda->ptr.pp_double[i][j]*d->ptr.p_double[ns+j]; } d->ptr.p_double[i] = (d->ptr.p_double[i]-v)/trdd->ptr.p_double[i]; } } static void snnls_trdfixvariable(/* Real */ ae_vector* trdd, /* Real */ ae_matrix* trda, ae_int_t ns, ae_int_t nd, ae_int_t idx, /* Real */ ae_vector* tmp, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; double cs; double sn; double r; double v; double vv; ae_assert(ns>=0, "TRDFixVariable: integrity error", _state); ae_assert(nd>=0, "TRDFixVariable: integrity error", _state); ae_assert(ns+nd>0, "TRDFixVariable: integrity error", _state); ae_assert(idx>=0, "TRDFixVariable: integrity error", _state); ae_assert(idxptr.p_double[idx] = 1.0; return; } for(j=0; j<=nd-1; j++) { /* * Apply first rotation */ tmp->ptr.p_double[j] = trda->ptr.pp_double[idx][j]; trda->ptr.pp_double[idx][j] = 0.0; } trdd->ptr.p_double[idx] = 1.0; for(i=0; i<=nd-1; i++) { if( ae_fp_neq(tmp->ptr.p_double[i],(double)(0)) ) { /* * Apply subsequent rotations with bottom triangular part of A */ generaterotation(trda->ptr.pp_double[ns+i][i], tmp->ptr.p_double[i], &cs, &sn, &r, _state); for(j=i; j<=nd-1; j++) { v = trda->ptr.pp_double[ns+i][j]; vv = tmp->ptr.p_double[j]; trda->ptr.pp_double[ns+i][j] = v*cs+vv*sn; tmp->ptr.p_double[j] = vv*cs-v*sn; } } } } else { /* * We fix variable in the dense part of the model. It means * that prior to fixing we have: * * ( | ) * ( D | ) * ( | ) * (-----| A ) * ( |0 ) * ( |00 ) * ( |000 ) * ( |0000 ) * ( |00000) * * then we replace idx-th column by zeros: * * ( | 0 ) * ( D | 0 ) * ( | 0 ) * (-----|A 0 A) * ( | 0 ) * ( | 0 ) * ( | 0 ) * * and append row with unit element to bottom, in order to * regularize problem * * ( | 0 ) * ( D | 0 ) * ( | 0 ) * (-----|A 0 A) * ( | 0 ) * ( | 0 ) * ( | 0 ) * (00000|00100) <- appended * * and then we nullify this row by applying rotations: * * (D 0 | ) * ( 0 | ) * ( 0 D| ) * (-----| A ) * ( | ) * ( | ) <- first rotation is applied here * ( | ) <- subsequent rotations are applied to rows below * ( 0 | 0 ) <- as result, row becomes zero * * and triangular structure is preserved. */ k = idx-ns; for(i=0; i<=ns+nd-1; i++) { trda->ptr.pp_double[i][k] = 0.0; } for(j=k+1; j<=nd-1; j++) { /* * Apply first rotation */ tmp->ptr.p_double[j] = trda->ptr.pp_double[idx][j]; trda->ptr.pp_double[idx][j] = 0.0; } trda->ptr.pp_double[idx][k] = 1.0; for(i=k+1; i<=nd-1; i++) { if( ae_fp_neq(tmp->ptr.p_double[i],(double)(0)) ) { /* * Apply subsequent rotations with bottom triangular part of A */ generaterotation(trda->ptr.pp_double[ns+i][i], tmp->ptr.p_double[i], &cs, &sn, &r, _state); for(j=i; j<=nd-1; j++) { v = trda->ptr.pp_double[ns+i][j]; vv = tmp->ptr.p_double[j]; trda->ptr.pp_double[ns+i][j] = v*cs+vv*sn; tmp->ptr.p_double[j] = vv*cs-v*sn; } } } } } void _snnlssolver_init(void* _p, ae_state *_state, ae_bool make_automatic) { snnlssolver *p = (snnlssolver*)_p; ae_touch_ptr((void*)p); ae_matrix_init(&p->densea, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nnc, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmpca, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmplq, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->trda, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->trdd, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->crb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagaa, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cborg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpcholesky, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->r, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->regdiag, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rdtmprowmap, 0, DT_INT, _state, make_automatic); } void _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { snnlssolver *dst = (snnlssolver*)_dst; snnlssolver *src = (snnlssolver*)_src; dst->ns = src->ns; dst->nd = src->nd; dst->nr = src->nr; ae_matrix_init_copy(&dst->densea, &src->densea, _state, make_automatic); ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic); ae_vector_init_copy(&dst->nnc, &src->nnc, _state, make_automatic); dst->debugflops = src->debugflops; dst->debugmaxinnerits = src->debugmaxinnerits; ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic); ae_matrix_init_copy(&dst->tmpca, &src->tmpca, _state, make_automatic); ae_matrix_init_copy(&dst->tmplq, &src->tmplq, _state, make_automatic); ae_matrix_init_copy(&dst->trda, &src->trda, _state, make_automatic); ae_vector_init_copy(&dst->trdd, &src->trdd, _state, make_automatic); ae_vector_init_copy(&dst->crb, &src->crb, _state, make_automatic); ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->dx, &src->dx, _state, make_automatic); ae_vector_init_copy(&dst->diagaa, &src->diagaa, _state, make_automatic); ae_vector_init_copy(&dst->cb, &src->cb, _state, make_automatic); ae_vector_init_copy(&dst->cx, &src->cx, _state, make_automatic); ae_vector_init_copy(&dst->cborg, &src->cborg, _state, make_automatic); ae_vector_init_copy(&dst->tmpcholesky, &src->tmpcholesky, _state, make_automatic); ae_vector_init_copy(&dst->r, &src->r, _state, make_automatic); ae_vector_init_copy(&dst->regdiag, &src->regdiag, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_vector_init_copy(&dst->rdtmprowmap, &src->rdtmprowmap, _state, make_automatic); } void _snnlssolver_clear(void* _p) { snnlssolver *p = (snnlssolver*)_p; ae_touch_ptr((void*)p); ae_matrix_clear(&p->densea); ae_vector_clear(&p->b); ae_vector_clear(&p->nnc); ae_vector_clear(&p->xn); ae_vector_clear(&p->xp); ae_matrix_clear(&p->tmpca); ae_matrix_clear(&p->tmplq); ae_matrix_clear(&p->trda); ae_vector_clear(&p->trdd); ae_vector_clear(&p->crb); ae_vector_clear(&p->g); ae_vector_clear(&p->d); ae_vector_clear(&p->dx); ae_vector_clear(&p->diagaa); ae_vector_clear(&p->cb); ae_vector_clear(&p->cx); ae_vector_clear(&p->cborg); ae_vector_clear(&p->tmpcholesky); ae_vector_clear(&p->r); ae_vector_clear(&p->regdiag); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmp2); ae_vector_clear(&p->rdtmprowmap); } void _snnlssolver_destroy(void* _p) { snnlssolver *p = (snnlssolver*)_p; ae_touch_ptr((void*)p); ae_matrix_destroy(&p->densea); ae_vector_destroy(&p->b); ae_vector_destroy(&p->nnc); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->xp); ae_matrix_destroy(&p->tmpca); ae_matrix_destroy(&p->tmplq); ae_matrix_destroy(&p->trda); ae_vector_destroy(&p->trdd); ae_vector_destroy(&p->crb); ae_vector_destroy(&p->g); ae_vector_destroy(&p->d); ae_vector_destroy(&p->dx); ae_vector_destroy(&p->diagaa); ae_vector_destroy(&p->cb); ae_vector_destroy(&p->cx); ae_vector_destroy(&p->cborg); ae_vector_destroy(&p->tmpcholesky); ae_vector_destroy(&p->r); ae_vector_destroy(&p->regdiag); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmp2); ae_vector_destroy(&p->rdtmprowmap); } #endif #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This subroutine is used to initialize active set. By default, empty N-variable model with no constraints is generated. Previously allocated buffer variables are reused as much as possible. Two use cases for this object are described below. CASE 1 - STEEPEST DESCENT: SASInit() repeat: SASReactivateConstraints() SASDescentDirection() SASExploreDirection() SASMoveTo() until convergence CASE 1 - PRECONDITIONED STEEPEST DESCENT: SASInit() repeat: SASReactivateConstraintsPrec() SASDescentDirectionPrec() SASExploreDirection() SASMoveTo() until convergence -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasinit(ae_int_t n, sactiveset* s, ae_state *_state) { ae_int_t i; s->n = n; s->algostate = 0; /* * Constraints */ s->constraintschanged = ae_true; s->nec = 0; s->nic = 0; rvectorsetlengthatleast(&s->bndl, n, _state); bvectorsetlengthatleast(&s->hasbndl, n, _state); rvectorsetlengthatleast(&s->bndu, n, _state); bvectorsetlengthatleast(&s->hasbndu, n, _state); for(i=0; i<=n-1; i++) { s->bndl.ptr.p_double[i] = _state->v_neginf; s->bndu.ptr.p_double[i] = _state->v_posinf; s->hasbndl.ptr.p_bool[i] = ae_false; s->hasbndu.ptr.p_bool[i] = ae_false; } /* * current point, scale */ s->hasxc = ae_false; rvectorsetlengthatleast(&s->xc, n, _state); rvectorsetlengthatleast(&s->s, n, _state); rvectorsetlengthatleast(&s->h, n, _state); for(i=0; i<=n-1; i++) { s->xc.ptr.p_double[i] = 0.0; s->s.ptr.p_double[i] = 1.0; s->h.ptr.p_double[i] = 1.0; } /* * Other */ rvectorsetlengthatleast(&s->unitdiagonal, n, _state); for(i=0; i<=n-1; i++) { s->unitdiagonal.ptr.p_double[i] = 1.0; } } /************************************************************************* This function sets scaling coefficients for SAS object. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function During orthogonalization phase, scale is used to calculate drop tolerances (whether vector is significantly non-zero or not). INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sassetscale(sactiveset* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(state->algostate==0, "SASSetScale: you may change scale only in modification mode", _state); ae_assert(s->cnt>=state->n, "SASSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "SASSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "SASSetScale: S contains zero elements", _state); } for(i=0; i<=state->n-1; i++) { state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE 1: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sassetprecdiag(sactiveset* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(state->algostate==0, "SASSetPrecDiag: you may change preconditioner only in modification mode", _state); ae_assert(d->cnt>=state->n, "SASSetPrecDiag: D is too short", _state); for(i=0; i<=state->n-1; i++) { ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "SASSetPrecDiag: D contains infinite or NAN elements", _state); ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "SASSetPrecDiag: D contains non-positive elements", _state); } for(i=0; i<=state->n-1; i++) { state->h.ptr.p_double[i] = d->ptr.p_double[i]; } } /************************************************************************* This function sets/changes boundary constraints. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sassetbc(sactiveset* state, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_state *_state) { ae_int_t i; ae_int_t n; ae_assert(state->algostate==0, "SASSetBC: you may change constraints only in modification mode", _state); n = state->n; ae_assert(bndl->cnt>=n, "SASSetBC: Length(BndL)cnt>=n, "SASSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "SASSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "SASSetBC: BndL contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } state->constraintschanged = ae_true; } /************************************************************************* This function sets linear constraints for SAS object. Linear constraints are inactive by default (after initial creation). INPUT PARAMETERS: State - SAS structure C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0 NOTE 1: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about Epsilon in magnitude) due to rounding errors * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void sassetlc(sactiveset* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_int_t n; ae_int_t i; ae_assert(state->algostate==0, "SASSetLC: you may change constraints only in modification mode", _state); n = state->n; /* * First, check for errors in the inputs */ ae_assert(k>=0, "SASSetLC: K<0", _state); ae_assert(c->cols>=n+1||k==0, "SASSetLC: Cols(C)rows>=k, "SASSetLC: Rows(C)cnt>=k, "SASSetLC: Length(CT)nec = 0; state->nic = 0; state->constraintschanged = ae_true; return; } /* * Equality constraints are stored first, in the upper * NEC rows of State.CLEIC matrix. Inequality constraints * are stored in the next NIC rows. * * NOTE: we convert inequality constraints to the form * A*x<=b before copying them. */ rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); state->nec = 0; state->nic = 0; for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]==0 ) { ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); state->nec = state->nec+1; } } for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]!=0 ) { if( ct->ptr.p_int[i]>0 ) { ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } else { ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } state->nic = state->nic+1; } } /* * Mark state as changed */ state->constraintschanged = ae_true; } /************************************************************************* Another variation of SASSetLC(), which accepts linear constraints using another representation. Linear constraints are inactive by default (after initial creation). INPUT PARAMETERS: State - SAS structure CLEIC - linear constraints, array[NEC+NIC,N+1]. Each row of C represents one constraint: * first N elements correspond to coefficients, * last element corresponds to the right part. First NEC rows store equality constraints, next NIC - are inequality ones. All elements of C (including right part) must be finite. NEC - number of equality constraints, NEC>=0 NIC - number of inequality constraints, NIC>=0 NOTE 1: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about Epsilon in magnitude) due to rounding errors * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void sassetlcx(sactiveset* state, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; ae_assert(state->algostate==0, "SASSetLCX: you may change constraints only in modification mode", _state); n = state->n; /* * First, check for errors in the inputs */ ae_assert(nec>=0, "SASSetLCX: NEC<0", _state); ae_assert(nic>=0, "SASSetLCX: NIC<0", _state); ae_assert(cleic->cols>=n+1||nec+nic==0, "SASSetLCX: Cols(CLEIC)rows>=nec+nic, "SASSetLCX: Rows(CLEIC)cleic, nec+nic, n+1, _state); state->nec = nec; state->nic = nic; for(i=0; i<=nec+nic-1; i++) { for(j=0; j<=n; j++) { state->cleic.ptr.pp_double[i][j] = cleic->ptr.pp_double[i][j]; } } /* * Mark state as changed */ state->constraintschanged = ae_true; } /************************************************************************* This subroutine turns on optimization mode: 1. feasibility in X is enforced (in case X=S.XC and constraints have not changed, algorithm just uses X without any modifications at all) 2. constraints are marked as "candidate" or "inactive" INPUT PARAMETERS: S - active set object X - initial point (candidate), array[N]. It is expected that X contains only finite values (we do not check it). OUTPUT PARAMETERS: S - state is changed X - initial point can be changed to enforce feasibility RESULT: True in case feasible point was found (mode was changed to "optimization") False in case no feasible point was found (mode was not changed) -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ ae_bool sasstartoptimization(sactiveset* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; ae_int_t j; double v; double v0; double v1; double vv; double vc; double vx; ae_bool result; ae_assert(state->algostate==0, "SASStartOptimization: already in optimization mode", _state); result = ae_false; n = state->n; nec = state->nec; nic = state->nic; /* * Enforce feasibility and calculate set of "candidate"/"active" constraints. * Always active equality constraints are marked as "active", all other constraints * are marked as "candidate". */ ivectorsetlengthatleast(&state->cstatus, n+nec+nic, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { return result; } } } ae_v_move(&state->xc.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); if( state->nec+state->nic>0 ) { /* * General linear constraints are present. * Try to use fast code for feasible initial point with modest * memory requirements. */ rvectorsetlengthatleast(&state->tmp0, n, _state); state->feasinitpt = ae_true; for(i=0; i<=n-1; i++) { state->tmp0.ptr.p_double[i] = x->ptr.p_double[i]; state->cstatus.ptr.p_int[i] = -1; if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->tmp0.ptr.p_double[i] = state->bndl.ptr.p_double[i]; state->cstatus.ptr.p_int[i] = 1; continue; } if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->tmp0.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 0; state->tmp0.ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->tmp0.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 0; state->tmp0.ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } for(i=0; i<=state->nec+state->nic-1; i++) { v = -state->cleic.ptr.pp_double[i][n]; v0 = (double)(0); v1 = (double)(0); for(j=0; j<=n-1; j++) { vx = state->tmp0.ptr.p_double[j]/state->s.ptr.p_double[j]; vc = state->cleic.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; v = v+vx*vc; v0 = v0+ae_sqr(vx, _state); v1 = v1+ae_sqr(vc, _state); } vv = ae_sqrt(v0, _state)*ae_sqrt(v1, _state)*1000*ae_machineepsilon; if( inec ) { state->cstatus.ptr.p_int[n+i] = 1; state->feasinitpt = state->feasinitpt&&ae_fp_less(ae_fabs(v, _state),vv); } else { state->feasinitpt = state->feasinitpt&&ae_fp_less(v,vv); if( ae_fp_less(v,-vv) ) { state->cstatus.ptr.p_int[n+i] = -1; } else { state->cstatus.ptr.p_int[n+i] = 0; } } } if( state->feasinitpt ) { ae_v_move(&state->xc.ptr.p_double[0], 1, &state->tmp0.ptr.p_double[0], 1, ae_v_len(0,n-1)); } /* * Fast code failed? Use general code with ~(N+NIC)^2 memory requirements */ if( !state->feasinitpt ) { rvectorsetlengthatleast(&state->tmp0, n, _state); rvectorsetlengthatleast(&state->tmpfeas, n+state->nic, _state); rmatrixsetlengthatleast(&state->tmpm0, state->nec+state->nic, n+state->nic+1, _state); for(i=0; i<=state->nec+state->nic-1; i++) { ae_v_move(&state->tmpm0.ptr.pp_double[i][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); for(j=n; j<=n+state->nic-1; j++) { state->tmpm0.ptr.pp_double[i][j] = (double)(0); } if( i>=state->nec ) { state->tmpm0.ptr.pp_double[i][n+i-state->nec] = 1.0; } state->tmpm0.ptr.pp_double[i][n+state->nic] = state->cleic.ptr.pp_double[i][n]; } ae_v_move(&state->tmpfeas.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=state->nic-1; i++) { v = ae_v_dotproduct(&state->cleic.ptr.pp_double[i+state->nec][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->tmpfeas.ptr.p_double[i+n] = ae_maxreal(state->cleic.ptr.pp_double[i+state->nec][n]-v, 0.0, _state); } if( !findfeasiblepoint(&state->tmpfeas, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, state->nic, &state->tmpm0, state->nec+state->nic, 1.0E-6, &i, &j, _state) ) { return result; } ae_v_move(&state->xc.ptr.p_double[0], 1, &state->tmpfeas.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 1; continue; } if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))||(state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i])) ) { state->cstatus.ptr.p_int[i] = 0; continue; } state->cstatus.ptr.p_int[i] = -1; } for(i=0; i<=state->nec-1; i++) { state->cstatus.ptr.p_int[n+i] = 1; } for(i=0; i<=state->nic-1; i++) { if( ae_fp_eq(state->tmpfeas.ptr.p_double[n+i],(double)(0)) ) { state->cstatus.ptr.p_int[n+state->nec+i] = 0; } else { state->cstatus.ptr.p_int[n+state->nec+i] = -1; } } } } else { /* * Only box constraints are present, quick code can be used */ for(i=0; i<=n-1; i++) { state->cstatus.ptr.p_int[i] = -1; if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 1; state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; continue; } if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; state->cstatus.ptr.p_int[i] = 0; continue; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->bndu.ptr.p_double[i]; state->cstatus.ptr.p_int[i] = 0; continue; } } state->feasinitpt = ae_true; } /* * Change state, allocate temporaries */ result = ae_true; state->algostate = 1; state->basisisready = ae_false; state->hasxc = ae_true; return result; } /************************************************************************* This function explores search direction and calculates bound for step as well as information for activation of constraints. INPUT PARAMETERS: State - SAS structure which stores current point and all other active set related information D - descent direction to explore OUTPUT PARAMETERS: StpMax - upper limit on step length imposed by yet inactive constraints. Can be zero in case some constraints can be activated by zero step. Equal to some large value in case step is unlimited. CIdx - -1 for unlimited step, in [0,N+NEC+NIC) in case of limited step. VVal - value which is assigned to X[CIdx] during activation. For CIdx<0 or CIdx>=N some dummy value is assigned to this parameter. *************************************************************************/ void sasexploredirection(sactiveset* state, /* Real */ ae_vector* d, double* stpmax, ae_int_t* cidx, double* vval, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; double prevmax; double vc; double vd; *stpmax = 0; *cidx = 0; *vval = 0; ae_assert(state->algostate==1, "SASExploreDirection: is not in optimization mode", _state); n = state->n; nec = state->nec; nic = state->nic; *cidx = -1; *vval = (double)(0); *stpmax = 1.0E50; for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]<=0 ) { ae_assert(!state->hasbndl.ptr.p_bool[i]||ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]), "SASExploreDirection: internal error - infeasible X", _state); ae_assert(!state->hasbndu.ptr.p_bool[i]||ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]), "SASExploreDirection: internal error - infeasible X", _state); if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(d->ptr.p_double[i],(double)(0)) ) { prevmax = *stpmax; *stpmax = safeminposrv(state->xc.ptr.p_double[i]-state->bndl.ptr.p_double[i], -d->ptr.p_double[i], *stpmax, _state); if( ae_fp_less(*stpmax,prevmax) ) { *cidx = i; *vval = state->bndl.ptr.p_double[i]; } } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(d->ptr.p_double[i],(double)(0)) ) { prevmax = *stpmax; *stpmax = safeminposrv(state->bndu.ptr.p_double[i]-state->xc.ptr.p_double[i], d->ptr.p_double[i], *stpmax, _state); if( ae_fp_less(*stpmax,prevmax) ) { *cidx = i; *vval = state->bndu.ptr.p_double[i]; } } } } for(i=nec; i<=nec+nic-1; i++) { if( state->cstatus.ptr.p_int[n+i]<=0 ) { vc = ae_v_dotproduct(&state->cleic.ptr.pp_double[i][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); vc = vc-state->cleic.ptr.pp_double[i][n]; vd = ae_v_dotproduct(&state->cleic.ptr.pp_double[i][0], 1, &d->ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_fp_less_eq(vd,(double)(0)) ) { continue; } if( ae_fp_less(vc,(double)(0)) ) { /* * XC is strictly feasible with respect to I-th constraint, * we can perform non-zero step because there is non-zero distance * between XC and bound. */ prevmax = *stpmax; *stpmax = safeminposrv(-vc, vd, *stpmax, _state); if( ae_fp_less(*stpmax,prevmax) ) { *cidx = n+i; } } else { /* * XC is at the boundary (or slightly beyond it), and step vector * points beyond the boundary. * * The only thing we can do is to perform zero step and activate * I-th constraint. */ *stpmax = (double)(0); *cidx = n+i; } } } } /************************************************************************* This subroutine moves current point to XN, which can be: a) point in the direction previously explored with SASExploreDirection() function (in this case NeedAct/CIdx/CVal are used) b) point in arbitrary direction, not necessarily previously checked with SASExploreDirection() function. Step may activate one constraint. It is assumed than XN is approximately feasible (small error as large as several ulps is possible). Strict feasibility with respect to bound constraints is enforced during activation, feasibility with respect to general linear constraints is not enforced. This function activates boundary constraints, such that both is True: 1) XC[I] is not at the boundary 2) XN[I] is at the boundary or beyond it INPUT PARAMETERS: S - active set object XN - new point. NeedAct - True in case one constraint needs activation CIdx - index of constraint, in [0,N+NEC+NIC). Ignored if NeedAct is false. This value is calculated by SASExploreDirection(). CVal - for CIdx in [0,N) this field stores value which is assigned to XC[CIdx] during activation. CVal is ignored in other cases. This value is calculated by SASExploreDirection(). OUTPUT PARAMETERS: S - current point and list of active constraints are changed. RESULT: >0, in case at least one inactive non-candidate constraint was activated =0, in case only "candidate" constraints were activated <0, in case no constraints were activated by the step NOTE: in general case State.XC<>XN because activation of constraints may slightly change current point (to enforce feasibility). -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ ae_int_t sasmoveto(sactiveset* state, /* Real */ ae_vector* xn, ae_bool needact, ae_int_t cidx, double cval, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; ae_bool wasactivation; ae_int_t result; ae_assert(state->algostate==1, "SASMoveTo: is not in optimization mode", _state); n = state->n; nec = state->nec; nic = state->nic; /* * Save previous state, update current point */ rvectorsetlengthatleast(&state->mtx, n, _state); ivectorsetlengthatleast(&state->mtas, n+nec+nic, _state); for(i=0; i<=n-1; i++) { state->mtx.ptr.p_double[i] = state->xc.ptr.p_double[i]; state->xc.ptr.p_double[i] = xn->ptr.p_double[i]; } for(i=0; i<=n+nec+nic-1; i++) { state->mtas.ptr.p_int[i] = state->cstatus.ptr.p_int[i]; } /* * Activate constraints */ bvectorsetlengthatleast(&state->mtnew, n+nec+nic, _state); wasactivation = ae_false; for(i=0; i<=n+nec+nic-1; i++) { state->mtnew.ptr.p_bool[i] = ae_false; } if( needact ) { /* * Activation */ ae_assert(cidx>=0&&cidxxc.ptr.p_double[cidx] = cval; } state->cstatus.ptr.p_int[cidx] = 1; state->mtnew.ptr.p_bool[cidx] = ae_true; wasactivation = ae_true; } for(i=0; i<=n-1; i++) { /* * Post-check (some constraints may be activated because of numerical errors) */ if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_neq(state->xc.ptr.p_double[i],state->mtx.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; state->cstatus.ptr.p_int[i] = 1; state->mtnew.ptr.p_bool[i] = ae_true; wasactivation = ae_true; } if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_neq(state->xc.ptr.p_double[i],state->mtx.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->bndu.ptr.p_double[i]; state->cstatus.ptr.p_int[i] = 1; state->mtnew.ptr.p_bool[i] = ae_true; wasactivation = ae_true; } } /* * Determine return status: * * -1 in case no constraints were activated * * 0 in case only "candidate" constraints were activated * * +1 in case at least one "non-candidate" constraint was activated */ if( wasactivation ) { /* * Step activated one/several constraints, but sometimes it is spurious * activation - RecalculateConstraints() tells us that constraint is * inactive (negative Largrange multiplier), but step activates it * because of numerical noise. * * This block of code checks whether step activated truly new constraints * (ones which were not in the active set at the solution): * * * for non-boundary constraint it is enough to check that previous value * of CStatus[i] is negative (=far from boundary), and new one is * positive (=we are at the boundary, constraint is activated). * * * for boundary constraints previous criterion won't work. Each variable * has two constraints, and simply checking their status is not enough - * we have to correctly identify cases when we leave one boundary * (PrevActiveSet[i]=0) and move to another boundary (CStatus[i]>0). * Such cases can be identified if we compare previous X with new X. * * In case only "candidate" constraints were activated, result variable * is set to 0. In case at least one new constraint was activated, result * is set to 1. */ result = 0; for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]>0&&ae_fp_neq(state->xc.ptr.p_double[i],state->mtx.ptr.p_double[i]) ) { result = 1; } } for(i=n; i<=n+state->nec+state->nic-1; i++) { if( state->mtas.ptr.p_int[i]<0&&state->cstatus.ptr.p_int[i]>0 ) { result = 1; } } } else { /* * No activation, return -1 */ result = -1; } /* * Update basis */ sasappendtobasis(state, &state->mtnew, _state); return result; } /************************************************************************* This subroutine performs immediate activation of one constraint: * "immediate" means that we do not have to move to activate it * in case boundary constraint is activated, we enforce current point to be exactly at the boundary INPUT PARAMETERS: S - active set object CIdx - index of constraint, in [0,N+NEC+NIC). This value is calculated by SASExploreDirection(). CVal - for CIdx in [0,N) this field stores value which is assigned to XC[CIdx] during activation. CVal is ignored in other cases. This value is calculated by SASExploreDirection(). -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasimmediateactivation(sactiveset* state, ae_int_t cidx, double cval, ae_state *_state) { ae_int_t i; ae_assert(state->algostate==1, "SASMoveTo: is not in optimization mode", _state); if( cidxn ) { state->xc.ptr.p_double[cidx] = cval; } state->cstatus.ptr.p_int[cidx] = 1; bvectorsetlengthatleast(&state->mtnew, state->n+state->nec+state->nic, _state); for(i=0; i<=state->n+state->nec+state->nic-1; i++) { state->mtnew.ptr.p_bool[i] = ae_false; } state->mtnew.ptr.p_bool[cidx] = ae_true; sasappendtobasis(state, &state->mtnew, _state); } /************************************************************************* This subroutine calculates descent direction subject to current active set. INPUT PARAMETERS: S - active set object G - array[N], gradient D - possibly prealocated buffer; automatically resized if needed. OUTPUT PARAMETERS: D - descent direction projected onto current active set. Components of D which correspond to active boundary constraints are forced to be exactly zero. In case D is non-zero, it is normalized to have unit norm. NOTE: in case active set has N active constraints (or more), descent direction is forced to be exactly zero. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasconstraineddescent(sactiveset* state, /* Real */ ae_vector* g, /* Real */ ae_vector* d, ae_state *_state) { ae_assert(state->algostate==1, "SASConstrainedDescent: is not in optimization mode", _state); sasrebuildbasis(state, _state); sactivesets_constraineddescent(state, g, &state->unitdiagonal, &state->idensebatch, ae_true, d, _state); } /************************************************************************* This subroutine calculates preconditioned descent direction subject to current active set. INPUT PARAMETERS: S - active set object G - array[N], gradient D - possibly prealocated buffer; automatically resized if needed. OUTPUT PARAMETERS: D - descent direction projected onto current active set. Components of D which correspond to active boundary constraints are forced to be exactly zero. In case D is non-zero, it is normalized to have unit norm. NOTE: in case active set has N active constraints (or more), descent direction is forced to be exactly zero. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasconstraineddescentprec(sactiveset* state, /* Real */ ae_vector* g, /* Real */ ae_vector* d, ae_state *_state) { ae_assert(state->algostate==1, "SASConstrainedDescentPrec: is not in optimization mode", _state); sasrebuildbasis(state, _state); sactivesets_constraineddescent(state, g, &state->h, &state->pdensebatch, ae_true, d, _state); } /************************************************************************* This subroutine calculates projection of direction vector to current active set. INPUT PARAMETERS: S - active set object D - array[N], direction OUTPUT PARAMETERS: D - direction projected onto current active set. Components of D which correspond to active boundary constraints are forced to be exactly zero. NOTE: in case active set has N active constraints (or more), descent direction is forced to be exactly zero. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasconstraineddirection(sactiveset* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(state->algostate==1, "SASConstrainedAntigradientPrec: is not in optimization mode", _state); sasrebuildbasis(state, _state); sactivesets_constraineddescent(state, d, &state->unitdiagonal, &state->idensebatch, ae_false, &state->cdtmp, _state); for(i=0; i<=state->n-1; i++) { d->ptr.p_double[i] = -state->cdtmp.ptr.p_double[i]; } } /************************************************************************* This subroutine calculates product of direction vector and preconditioner multiplied subject to current active set. INPUT PARAMETERS: S - active set object D - array[N], direction OUTPUT PARAMETERS: D - preconditioned direction projected onto current active set. Components of D which correspond to active boundary constraints are forced to be exactly zero. NOTE: in case active set has N active constraints (or more), descent direction is forced to be exactly zero. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasconstraineddirectionprec(sactiveset* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(state->algostate==1, "SASConstrainedAntigradientPrec: is not in optimization mode", _state); sasrebuildbasis(state, _state); sactivesets_constraineddescent(state, d, &state->h, &state->pdensebatch, ae_false, &state->cdtmp, _state); for(i=0; i<=state->n-1; i++) { d->ptr.p_double[i] = -state->cdtmp.ptr.p_double[i]; } } /************************************************************************* This subroutine performs correction of some (possibly infeasible) point with respect to a) current active set, b) all boundary constraints, both active and inactive: 0) we calculate L1 penalty term for violation of active linear constraints (one which is returned by SASActiveLCPenalty1() function). 1) first, it performs projection (orthogonal with respect to scale matrix S) of X into current active set: X -> X1. 2) next, we perform projection with respect to ALL boundary constraints which are violated at X1: X1 -> X2. 3) X is replaced by X2. The idea is that this function can preserve and enforce feasibility during optimization, and additional penalty parameter can be used to prevent algo from leaving feasible set because of rounding errors. INPUT PARAMETERS: S - active set object X - array[N], candidate point OUTPUT PARAMETERS: X - "improved" candidate point: a) feasible with respect to all boundary constraints b) feasibility with respect to active set is retained at good level. Penalty - penalty term, which can be added to function value if user wants to penalize violation of constraints (recommended). NOTE: this function is not intended to find exact projection (i.e. best approximation) of X into feasible set. It just improves situation a bit. Regular use of this function will help you to retain feasibility - if you already have something to start with and constrain your steps is such way that the only source of infeasibility are roundoff errors. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sascorrection(sactiveset* state, /* Real */ ae_vector* x, double* penalty, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t n; double v; *penalty = 0; ae_assert(state->algostate==1, "SASCorrection: is not in optimization mode", _state); sasrebuildbasis(state, _state); n = state->n; rvectorsetlengthatleast(&state->corrtmp, n, _state); /* * Calculate penalty term. */ *penalty = sasactivelcpenalty1(state, x, _state); /* * Perform projection 1. * * This projecton is given by: * * x_proj = x - S*S*As'*(As*x-b) * * where x is original x before projection, S is a scale matrix, * As is a matrix of equality constraints (active set) which were * orthogonalized with respect to inner product given by S (i.e. we * have As*S*S'*As'=I), b is a right part of the orthogonalized * constraints. * * NOTE: you can verify that x_proj is strictly feasible w.r.t. * active set by multiplying it by As - you will get * As*x_proj = As*x - As*x + b = b. * * This formula for projection can be obtained by solving * following minimization problem. * * min ||inv(S)*(x_proj-x)||^2 s.t. As*x_proj=b * * NOTE: we apply sparse batch by examining CStatus[]; it is guaranteed * to contain sparse batch, but avoids roundoff errors associated * with the fact that some box constraints were moved to sparse * storage * */ ae_v_move(&state->corrtmp.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=state->densebatchsize-1; i++) { v = -state->sdensebatch.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = v+state->sdensebatch.ptr.pp_double[i][j]*state->corrtmp.ptr.p_double[j]; } for(j=0; j<=n-1; j++) { state->corrtmp.ptr.p_double[j] = state->corrtmp.ptr.p_double[j]-v*state->sdensebatch.ptr.pp_double[i][j]*ae_sqr(state->s.ptr.p_double[j], _state); } } for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]>0 ) { state->corrtmp.ptr.p_double[i] = state->xc.ptr.p_double[i]; } } /* * Perform projection 2 */ for(i=0; i<=n-1; i++) { x->ptr.p_double[i] = state->corrtmp.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(x->ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(x->ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } } /************************************************************************* This subroutine returns L1 penalty for violation of active general linear constraints (violation of boundary or inactive linear constraints is not added to penalty). Penalty term is equal to: Penalty = SUM( Abs((C_i*x-R_i)/Alpha_i) ) Here: * summation is performed for I=0...NEC+NIC-1, CStatus[N+I]>0 (only for rows of CLEIC which are in active set) * C_i is I-th row of CLEIC * R_i is corresponding right part * S is a scale matrix * Alpha_i = ||S*C_i|| - is a scaling coefficient which "normalizes" I-th summation term according to its scale. INPUT PARAMETERS: S - active set object X - array[N], candidate point -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ double sasactivelcpenalty1(sactiveset* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t n; ae_int_t nec; ae_int_t nic; double v; double alpha; double p; double result; ae_assert(state->algostate==1, "SASActiveLCPenalty1: is not in optimization mode", _state); sasrebuildbasis(state, _state); n = state->n; nec = state->nec; nic = state->nic; /* * Calculate penalty term. */ result = (double)(0); for(i=0; i<=nec+nic-1; i++) { if( state->cstatus.ptr.p_int[n+i]>0 ) { alpha = (double)(0); p = -state->cleic.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = state->cleic.ptr.pp_double[i][j]; p = p+v*x->ptr.p_double[j]; alpha = alpha+ae_sqr(v*state->s.ptr.p_double[j], _state); } alpha = ae_sqrt(alpha, _state); if( ae_fp_neq(alpha,(double)(0)) ) { result = result+ae_fabs(p/alpha, _state); } } } return result; } /************************************************************************* This subroutine calculates scaled norm of vector after projection onto subspace of active constraints. Most often this function is used to test stopping conditions. INPUT PARAMETERS: S - active set object D - vector whose norm is calculated RESULT: Vector norm (after projection and scaling) NOTE: projection is performed first, scaling is performed after projection NOTE: if we have N active constraints, zero value (exact zero) is returned -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ double sasscaledconstrainednorm(sactiveset* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_int_t n; double v; double result; ae_assert(state->algostate==1, "SASMoveTo: is not in optimization mode", _state); n = state->n; rvectorsetlengthatleast(&state->scntmp, n, _state); /* * Prepare basis (if needed) */ sasrebuildbasis(state, _state); /* * Calculate descent direction */ if( state->sparsebatchsize+state->densebatchsize>=n ) { /* * Quick exit if number of active constraints is N or larger */ result = 0.0; return result; } for(i=0; i<=n-1; i++) { state->scntmp.ptr.p_double[i] = d->ptr.p_double[i]; } for(i=0; i<=state->densebatchsize-1; i++) { v = ae_v_dotproduct(&state->idensebatch.ptr.pp_double[i][0], 1, &state->scntmp.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_subd(&state->scntmp.ptr.p_double[0], 1, &state->idensebatch.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]>0 ) { state->scntmp.ptr.p_double[i] = (double)(0); } } v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->s.ptr.p_double[i]*state->scntmp.ptr.p_double[i], _state); } result = ae_sqrt(v, _state); return result; } /************************************************************************* This subroutine turns off optimization mode. INPUT PARAMETERS: S - active set object OUTPUT PARAMETERS: S - state is changed NOTE: this function can be called many times for optimizer which was already stopped. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ void sasstopoptimization(sactiveset* state, ae_state *_state) { state->algostate = 0; } /************************************************************************* This function recalculates constraints - activates and deactivates them according to gradient value at current point. Algorithm assumes that we want to make steepest descent step from current point; constraints are activated and deactivated in such way that we won't violate any constraint by steepest descent step. After call to this function active set is ready to try steepest descent step (SASDescentDirection-SASExploreDirection-SASMoveTo). Only already "active" and "candidate" elements of ActiveSet are examined; constraints which are not active are not examined. INPUT PARAMETERS: State - active set object GC - array[N], gradient at XC OUTPUT PARAMETERS: State - active set object, with new set of constraint -- ALGLIB -- Copyright 26.09.2012 by Bochkanov Sergey *************************************************************************/ void sasreactivateconstraints(sactiveset* state, /* Real */ ae_vector* gc, ae_state *_state) { ae_assert(state->algostate==1, "SASReactivateConstraints: must be in optimization mode", _state); sactivesets_reactivateconstraints(state, gc, &state->unitdiagonal, _state); } /************************************************************************* This function recalculates constraints - activates and deactivates them according to gradient value at current point. Algorithm assumes that we want to make Quasi-Newton step from current point with diagonal Quasi-Newton matrix H. Constraints are activated and deactivated in such way that we won't violate any constraint by step. After call to this function active set is ready to try preconditioned steepest descent step (SASDescentDirection-SASExploreDirection-SASMoveTo). Only already "active" and "candidate" elements of ActiveSet are examined; constraints which are not active are not examined. INPUT PARAMETERS: State - active set object GC - array[N], gradient at XC OUTPUT PARAMETERS: State - active set object, with new set of constraint -- ALGLIB -- Copyright 26.09.2012 by Bochkanov Sergey *************************************************************************/ void sasreactivateconstraintsprec(sactiveset* state, /* Real */ ae_vector* gc, ae_state *_state) { ae_assert(state->algostate==1, "SASReactivateConstraintsPrec: must be in optimization mode", _state); sactivesets_reactivateconstraints(state, gc, &state->h, _state); } /************************************************************************* This function builds three orthonormal basises for current active set: * P-orthogonal one, which is orthogonalized with inner product (x,y) = x'*P*y, where P=inv(H) is current preconditioner * S-orthogonal one, which is orthogonalized with inner product (x,y) = x'*S'*S*y, where S is diagonal scaling matrix * I-orthogonal one, which is orthogonalized with standard dot product NOTE: all sets of orthogonal vectors are guaranteed to have same size. P-orthogonal basis is built first, I/S-orthogonal basises are forced to have same number of vectors as P-orthogonal one (padded by zero vectors if needed). NOTE: this function tracks changes in active set; first call will result in reorthogonalization INPUT PARAMETERS: State - active set object H - diagonal preconditioner, H[i]>0 OUTPUT PARAMETERS: State - active set object with new basis -- ALGLIB -- Copyright 20.06.2012 by Bochkanov Sergey *************************************************************************/ void sasrebuildbasis(sactiveset* state, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; ae_int_t j; ae_bool hasactivelin; ae_int_t candidatescnt; double v; double vv; double vmax; ae_int_t kmax; if( state->basisisready ) { return; } n = state->n; nec = state->nec; nic = state->nic; rvectorsetlengthatleast(&state->tmp0, n, _state); rvectorsetlengthatleast(&state->tmpprodp, n, _state); rvectorsetlengthatleast(&state->tmpprods, n, _state); rvectorsetlengthatleast(&state->tmpcp, n+1, _state); rvectorsetlengthatleast(&state->tmpcs, n+1, _state); rvectorsetlengthatleast(&state->tmpci, n+1, _state); rmatrixsetlengthatleast(&state->tmpbasis, nec+nic, n+1, _state); rmatrixsetlengthatleast(&state->pdensebatch, nec+nic, n+1, _state); rmatrixsetlengthatleast(&state->idensebatch, nec+nic, n+1, _state); rmatrixsetlengthatleast(&state->sdensebatch, nec+nic, n+1, _state); ivectorsetlengthatleast(&state->sparsebatch, n, _state); state->sparsebatchsize = 0; state->densebatchsize = 0; state->basisage = 0; state->basisisready = ae_true; /* * Determine number of active boundary and non-boundary * constraints, move them to TmpBasis. Quick exit if no * non-boundary constraints were detected. */ hasactivelin = ae_false; for(i=0; i<=nec+nic-1; i++) { if( state->cstatus.ptr.p_int[n+i]>0 ) { hasactivelin = ae_true; } } for(j=0; j<=n-1; j++) { if( state->cstatus.ptr.p_int[j]>0 ) { state->sparsebatch.ptr.p_int[state->sparsebatchsize] = j; state->sparsebatchsize = state->sparsebatchsize+1; } } if( !hasactivelin ) { return; } /* * Prepare precomputed values */ rvectorsetlengthatleast(&state->tmpreciph, n, _state); for(i=0; i<=n-1; i++) { state->tmpreciph.ptr.p_double[i] = 1/state->h.ptr.p_double[i]; } /* * Prepare initial candidate set: * * select active constraints * * normalize (inner product is given by preconditioner) * * orthogonalize with respect to active box constraints * * copy normalized/orthogonalized candidates to PBasis/SBasis/IBasis */ candidatescnt = 0; for(i=0; i<=nec+nic-1; i++) { if( state->cstatus.ptr.p_int[n+i]>0 ) { ae_v_move(&state->tmpbasis.ptr.pp_double[candidatescnt][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n)); inc(&candidatescnt, _state); } } for(i=0; i<=candidatescnt-1; i++) { v = 0.0; for(j=0; j<=n-1; j++) { v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j], _state)*state->tmpreciph.ptr.p_double[j]; } if( ae_fp_greater(v,(double)(0)) ) { v = 1/ae_sqrt(v, _state); for(j=0; j<=n; j++) { state->tmpbasis.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]*v; } } } for(j=0; j<=n-1; j++) { if( state->cstatus.ptr.p_int[j]>0 ) { for(i=0; i<=candidatescnt-1; i++) { state->tmpbasis.ptr.pp_double[i][n] = state->tmpbasis.ptr.pp_double[i][n]-state->tmpbasis.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; state->tmpbasis.ptr.pp_double[i][j] = 0.0; } } } for(i=0; i<=candidatescnt-1; i++) { for(j=0; j<=n; j++) { state->pdensebatch.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]; state->sdensebatch.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]; state->idensebatch.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]; } } /* * Perform orthogonalization of general linear constraints with respect * to each other (constraints in P/S/IBasis are already normalized w.r.t. * box constraints). During this process we select strictly active constraints * from the candidate set, and drop ones which were detected as redundant * during orthogonalization. * * Orthogonalization is performed with the help of Gram-Schmidt process. * Due to accumulation of round-off errors it is beneficial to perform * pivoting, i.e. to select candidate vector with largest norm at each * step. * * First (basic) version of the algorithm is: * 0. split all constraints into two sets: basis ones (initially empty) * and candidate ones (all constraints) * 1. fill PBasis with H-normalized candidate constraints, fill * corresponding entries of S/IBasis with corresponding * (non-normalized) constraints * 2. select row of PBasis with largest norm, move it (and its S/IBasis * counterparts) to the beginning of the candidate set, H-normalize * this row (rows of S/IBasis are normalized using corresponding norms). * Stop if largest row is nearly (or exactly) zero. * 3. orthogonalize remaining rows of P/S/IBasis with respect to * one chosen at step (2). It can be done efficiently using * combination of DGEMV/DGER BLAS calls. * 4. increase basis size by one, decrease candidate set size by one, * goto (2) * * However, naive implementation of the algorithm above spends significant * amount of time in step (2) - selection of row with largest H-norm. Step * (3) can be efficiently implemented with optimized BLAS, but we have no * optimized BLAS kernels for step(2). And because step (3) changes row norms, * step (2) have to be re-calculated every time, which is quite slow. * * We can save significant amount of calculations by noticing that: * * step (3) DECREASES row norms, but never increases it * * we can maintain upper bounds for row H-norms is a separate array, * use them for initial evaluation of best candidates, and update them * after we find some promising row (all bounds are invalidated after * step 3, but their old values still carry some information) * * it is beneficial re-evaluate bounds only for rows which are * significantly (at least few percents) larger than best one found so far * * because rows are initially normalized, initial values for upper bounds * can be set to 1.0 */ ae_assert(state->densebatchsize==0, "SAS: integrity check failed", _state); ae_assert(ae_fp_greater(sactivesets_minnormseparation,(double)(0)), "SAS: integrity check failed", _state); rvectorsetlengthatleast(&state->tmpnormestimates, candidatescnt, _state); for(i=0; i<=candidatescnt-1; i++) { state->tmpnormestimates.ptr.p_double[i] = 1.0; } while(state->sparsebatchsize+state->densebatchsizedensebatchsize; i<=state->densebatchsize+candidatescnt-1; i++) { /* * Use upper bound for row norm for initial evaluation. * Skip rows whose upper bound is less than best candidate * found so far. * * NOTE: in fact, we may skip rows whose upper bound is * marginally higher than that of best candidate. * No need to perform costly re-evaluation in order * to get just few percents of improvement. */ if( ae_fp_less(state->tmpnormestimates.ptr.p_double[i],vmax*(1+sactivesets_minnormseparation)) ) { continue; } /* * OK, upper bound is large enough... lets perform full * re-evaluation and update of the estimate. */ v = 0.0; for(j=0; j<=n-1; j++) { vv = state->pdensebatch.ptr.pp_double[i][j]; v = v+vv*vv*state->tmpreciph.ptr.p_double[j]; } v = ae_sqrt(v, _state); state->tmpnormestimates.ptr.p_double[i] = v; /* * Now compare with best candidate so far */ if( ae_fp_greater(v,vmax) ) { vmax = v; kmax = i; } } if( ae_fp_less(vmax,1.0E4*ae_machineepsilon)||kmax<0 ) { /* * All candidates are either zero or too small (after orthogonalization) */ candidatescnt = 0; break; } /* * Candidate is selected for inclusion into basis set. * * Move candidate row to the beginning of candidate array (which is * right past the end of the approved basis). Normalize (for P-basis * we perform preconditioner-based normalization, for S-basis - scale * based, for I-basis - identity based). */ swaprows(&state->pdensebatch, state->densebatchsize, kmax, n+1, _state); swaprows(&state->sdensebatch, state->densebatchsize, kmax, n+1, _state); swaprows(&state->idensebatch, state->densebatchsize, kmax, n+1, _state); swapelements(&state->tmpnormestimates, state->densebatchsize, kmax, _state); v = 1/vmax; ae_v_muld(&state->pdensebatch.ptr.pp_double[state->densebatchsize][0], 1, ae_v_len(0,n), v); v = (double)(0); for(j=0; j<=n-1; j++) { vv = state->sdensebatch.ptr.pp_double[state->densebatchsize][j]*state->s.ptr.p_double[j]; v = v+vv*vv; } ae_assert(ae_fp_greater(v,(double)(0)), "SActiveSet.RebuildBasis(): integrity check failed, SNorm=0", _state); v = 1/ae_sqrt(v, _state); ae_v_muld(&state->sdensebatch.ptr.pp_double[state->densebatchsize][0], 1, ae_v_len(0,n), v); v = (double)(0); for(j=0; j<=n-1; j++) { vv = state->idensebatch.ptr.pp_double[state->densebatchsize][j]; v = v+vv*vv; } ae_assert(ae_fp_greater(v,(double)(0)), "SActiveSet.RebuildBasis(): integrity check failed, INorm=0", _state); v = 1/ae_sqrt(v, _state); ae_v_muld(&state->idensebatch.ptr.pp_double[state->densebatchsize][0], 1, ae_v_len(0,n), v); /* * Reorthogonalize other candidates with respect to candidate #0: * * calculate projections en masse with GEMV() * * subtract projections with GER() */ rvectorsetlengthatleast(&state->tmp0, candidatescnt-1, _state); for(j=0; j<=n-1; j++) { state->tmpprodp.ptr.p_double[j] = state->pdensebatch.ptr.pp_double[state->densebatchsize][j]/state->h.ptr.p_double[j]; state->tmpprods.ptr.p_double[j] = state->sdensebatch.ptr.pp_double[state->densebatchsize][j]*ae_sqr(state->s.ptr.p_double[j], _state); } for(j=0; j<=n; j++) { state->tmpcp.ptr.p_double[j] = state->pdensebatch.ptr.pp_double[state->densebatchsize][j]; state->tmpcs.ptr.p_double[j] = state->sdensebatch.ptr.pp_double[state->densebatchsize][j]; state->tmpci.ptr.p_double[j] = state->idensebatch.ptr.pp_double[state->densebatchsize][j]; } rmatrixgemv(candidatescnt-1, n, 1.0, &state->pdensebatch, state->densebatchsize+1, 0, 0, &state->tmpprodp, 0, 0.0, &state->tmp0, 0, _state); rmatrixger(candidatescnt-1, n+1, &state->pdensebatch, state->densebatchsize+1, 0, -1.0, &state->tmp0, 0, &state->tmpcp, 0, _state); rmatrixgemv(candidatescnt-1, n, 1.0, &state->sdensebatch, state->densebatchsize+1, 0, 0, &state->tmpprods, 0, 0.0, &state->tmp0, 0, _state); rmatrixger(candidatescnt-1, n+1, &state->sdensebatch, state->densebatchsize+1, 0, -1.0, &state->tmp0, 0, &state->tmpcs, 0, _state); rmatrixgemv(candidatescnt-1, n, 1.0, &state->idensebatch, state->densebatchsize+1, 0, 0, &state->tmpci, 0, 0.0, &state->tmp0, 0, _state); rmatrixger(candidatescnt-1, n+1, &state->idensebatch, state->densebatchsize+1, 0, -1.0, &state->tmp0, 0, &state->tmpci, 0, _state); /* * Increase basis, decrease candidates count */ inc(&state->densebatchsize, _state); dec(&candidatescnt, _state); } } /************************************************************************* This function appends new constraints (if possible; sometimes it isn't!) to three orthonormal basises for current active set: * P-orthogonal one, which is orthogonalized with inner product (x,y) = x'*P*y, where P=inv(H) is current preconditioner * S-orthogonal one, which is orthogonalized with inner product (x,y) = x'*S'*S*y, where S is diagonal scaling matrix * I-orthogonal one, which is orthogonalized with standard dot product NOTE: all sets of orthogonal vectors are guaranteed to have same size. P-orthogonal basis is built first, I/S-orthogonal basises are forced to have same number of vectors as P-orthogonal one (padded by zero vectors if needed). NOTE: this function may fail to update basis without full recalculation; in such case it will set BasisIsReady to False and silently return; if it succeeds, it will increase BasisSize. INPUT PARAMETERS: State - active set object NewEntries - array[N+NEC+NIC], indexes of constraints being added are marked as True; it is responsibility of the caller to specify only those constraints which were previously inactive; when some constraint is already in the active set, algorithm behavior is undefined. OUTPUT PARAMETERS: State - active set object with new basis -- ALGLIB -- Copyright 03.10.2017 by Bochkanov Sergey *************************************************************************/ void sasappendtobasis(sactiveset* state, /* Boolean */ ae_vector* newentries, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; ae_int_t j; ae_int_t t; ae_int_t nact; double v; double vp; double vs; double vi; double initnormp; double projnormp; double projnorms; double projnormi; if( !state->basisisready ) { return; } n = state->n; nec = state->nec; nic = state->nic; /* * Count number of constraints to activate; * perform integrity check. */ nact = 0; for(i=0; i<=n-1; i++) { if( newentries->ptr.p_bool[i] ) { nact = nact+1; } } for(i=n; i<=n+nec-1; i++) { ae_assert(!newentries->ptr.p_bool[i], "SAS: integrity check failed (appendtobasis.0)", _state); } for(i=n+nec; i<=n+nec+nic-1; i++) { if( newentries->ptr.p_bool[i] ) { nact = nact+1; } } if( nact+state->basisage>sactivesets_maxbasisage ) { state->basisisready = ae_false; return; } /* * Resize basis matrices if needed */ rmatrixgrowrowsto(&state->pdensebatch, state->densebatchsize+nact, n+1, _state); rmatrixgrowrowsto(&state->sdensebatch, state->densebatchsize+nact, n+1, _state); rmatrixgrowrowsto(&state->idensebatch, state->densebatchsize+nact, n+1, _state); /* * Try adding recommended entries to basis. * If reorthogonalization removes too much of candidate constraint, * we will invalidate basis and try to rebuild it from scratch. */ rvectorsetlengthatleast(&state->tmp0, n+1, _state); rvectorsetlengthatleast(&state->tmpcp, n+1, _state); rvectorsetlengthatleast(&state->tmpcs, n+1, _state); rvectorsetlengthatleast(&state->tmpci, n+1, _state); rvectorsetlengthatleast(&state->tmpprodp, n, _state); rvectorsetlengthatleast(&state->tmpprods, n, _state); for(t=0; t<=n+nec+nic-1; t++) { if( newentries->ptr.p_bool[t] ) { /* * Basis is full? Quick skip! */ if( state->sparsebatchsize+state->densebatchsize>=n ) { ae_assert(state->sparsebatchsize+state->densebatchsize==n, "SAS: integrity check failed (sasappendtobasis)", _state); break; } /* * Copy constraint to temporary storage. */ if( ttmp0.ptr.p_double[j] = (double)(0); } state->tmp0.ptr.p_double[t] = 1.0; state->tmp0.ptr.p_double[n] = state->xc.ptr.p_double[t]; } else { /* * Copy general linear constraint */ for(j=0; j<=n; j++) { state->tmp0.ptr.p_double[j] = state->cleic.ptr.pp_double[t-n][j]; } } /* * Calculate initial norm (preconditioner is used for norm calculation). */ initnormp = 0.0; for(j=0; j<=n-1; j++) { v = state->tmp0.ptr.p_double[j]; initnormp = initnormp+v*v/state->h.ptr.p_double[j]; } initnormp = ae_sqrt(initnormp, _state); if( ae_fp_eq(initnormp,(double)(0)) ) { /* * Well, it is not expected. Let's just rebuild basis * from scratch and forget about this strange situation... */ state->basisisready = ae_false; return; } /* * Orthogonalize Tmp0 w.r.t. sparse batch (box constraints stored in sparse storage). * * Copy to TmpCP/TmpCS/TmpCI (P for preconditioner-based inner product * used for orthogonalization, S for scale-based orthogonalization, * I for "traditional" inner product used for Gram-Schmidt orthogonalization). */ for(i=0; i<=state->sparsebatchsize-1; i++) { j = state->sparsebatch.ptr.p_int[i]; state->tmp0.ptr.p_double[n] = state->tmp0.ptr.p_double[n]-state->tmp0.ptr.p_double[j]*state->xc.ptr.p_double[j]; state->tmp0.ptr.p_double[j] = 0.0; } for(j=0; j<=n; j++) { state->tmpcp.ptr.p_double[j] = state->tmp0.ptr.p_double[j]; state->tmpcs.ptr.p_double[j] = state->tmp0.ptr.p_double[j]; state->tmpci.ptr.p_double[j] = state->tmp0.ptr.p_double[j]; } /* * Orthogonalize TmpCP/S/I with respect to active linear constraints from dense batch. * Corresponding norm (preconditioner, scale, identity) is used in each case. */ for(j=0; j<=n-1; j++) { state->tmpprodp.ptr.p_double[j] = 1/state->h.ptr.p_double[j]; state->tmpprods.ptr.p_double[j] = ae_sqr(state->s.ptr.p_double[j], _state); } for(i=0; i<=state->densebatchsize-1; i++) { vp = (double)(0); vs = (double)(0); vi = (double)(0); for(j=0; j<=n-1; j++) { vp = vp+state->pdensebatch.ptr.pp_double[i][j]*state->tmpcp.ptr.p_double[j]*state->tmpprodp.ptr.p_double[j]; vs = vs+state->sdensebatch.ptr.pp_double[i][j]*state->tmpcs.ptr.p_double[j]*state->tmpprods.ptr.p_double[j]; vi = vi+state->idensebatch.ptr.pp_double[i][j]*state->tmpci.ptr.p_double[j]; } ae_v_subd(&state->tmpcp.ptr.p_double[0], 1, &state->pdensebatch.ptr.pp_double[i][0], 1, ae_v_len(0,n), vp); ae_v_subd(&state->tmpcs.ptr.p_double[0], 1, &state->sdensebatch.ptr.pp_double[i][0], 1, ae_v_len(0,n), vs); ae_v_subd(&state->tmpci.ptr.p_double[0], 1, &state->idensebatch.ptr.pp_double[i][0], 1, ae_v_len(0,n), vi); } projnormp = 0.0; projnorms = 0.0; projnormi = 0.0; for(j=0; j<=n-1; j++) { projnormp = projnormp+ae_sqr(state->tmpcp.ptr.p_double[j], _state)/state->h.ptr.p_double[j]; projnorms = projnorms+ae_sqr(state->tmpcs.ptr.p_double[j], _state)*ae_sqr(state->s.ptr.p_double[j], _state); projnormi = projnormi+ae_sqr(state->tmpci.ptr.p_double[j], _state); } projnormp = ae_sqrt(projnormp, _state); projnorms = ae_sqrt(projnorms, _state); projnormi = ae_sqrt(projnormi, _state); if( ae_fp_less_eq(projnormp,sactivesets_maxbasisdecay*initnormp) ) { state->basisisready = ae_false; return; /* * Nearly zero row, skip */ } ae_assert(ae_fp_greater(projnormp,(double)(0)), "SAS: integrity check failed, ProjNormP=0", _state); ae_assert(ae_fp_greater(projnorms,(double)(0)), "SAS: integrity check failed, ProjNormS=0", _state); ae_assert(ae_fp_greater(projnormi,(double)(0)), "SAS: integrity check failed, ProjNormI=0", _state); v = 1/projnormp; ae_v_moved(&state->pdensebatch.ptr.pp_double[state->densebatchsize][0], 1, &state->tmpcp.ptr.p_double[0], 1, ae_v_len(0,n), v); v = 1/projnorms; ae_v_moved(&state->sdensebatch.ptr.pp_double[state->densebatchsize][0], 1, &state->tmpcs.ptr.p_double[0], 1, ae_v_len(0,n), v); v = 1/projnormi; ae_v_moved(&state->idensebatch.ptr.pp_double[state->densebatchsize][0], 1, &state->tmpci.ptr.p_double[0], 1, ae_v_len(0,n), v); /* * Increase set size */ inc(&state->densebatchsize, _state); inc(&state->basisage, _state); } } } /************************************************************************* This subroutine calculates preconditioned descent direction subject to current active set. INPUT PARAMETERS: State - active set object G - array[N], gradient H - array[N], Hessian matrix HA - active constraints orthogonalized in such way that HA*inv(H)*HA'= I. Normalize- whether we need normalized descent or not D - possibly preallocated buffer; automatically resized. OUTPUT PARAMETERS: D - descent direction projected onto current active set. Components of D which correspond to active boundary constraints are forced to be exactly zero. In case D is non-zero and Normalize is True, it is normalized to have unit norm. NOTE: if we have N active constraints, D is explicitly set to zero. -- ALGLIB -- Copyright 21.12.2012 by Bochkanov Sergey *************************************************************************/ static void sactivesets_constraineddescent(sactiveset* state, /* Real */ ae_vector* g, /* Real */ ae_vector* h, /* Real */ ae_matrix* ha, ae_bool normalize, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t n; double v; ae_assert(state->algostate==1, "SAS: internal error in ConstrainedDescent() - not in optimization mode", _state); ae_assert(state->basisisready, "SAS: internal error in ConstrainedDescent() - no basis", _state); n = state->n; rvectorsetlengthatleast(d, n, _state); /* * Calculate preconditioned constrained descent direction: * * d := -inv(H)*( g - HA'*(HA*inv(H)*g) ) * * Formula above always gives direction which is orthogonal to rows of HA. * You can verify it by multiplication of both sides by HA[i] (I-th row), * taking into account that HA*inv(H)*HA'= I (by definition of HA - it is * orthogonal basis with inner product given by inv(H)). */ for(i=0; i<=n-1; i++) { d->ptr.p_double[i] = g->ptr.p_double[i]; } for(i=0; i<=state->densebatchsize-1; i++) { v = 0.0; for(j=0; j<=n-1; j++) { v = v+ha->ptr.pp_double[i][j]*d->ptr.p_double[j]/h->ptr.p_double[j]; } ae_v_subd(&d->ptr.p_double[0], 1, &ha->ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]>0 ) { d->ptr.p_double[i] = (double)(0); } } v = 0.0; for(i=0; i<=n-1; i++) { d->ptr.p_double[i] = -d->ptr.p_double[i]/h->ptr.p_double[i]; v = v+ae_sqr(d->ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( state->sparsebatchsize+state->densebatchsize>=n ) { v = (double)(0); for(i=0; i<=n-1; i++) { d->ptr.p_double[i] = (double)(0); } } if( normalize&&ae_fp_greater(v,(double)(0)) ) { for(i=0; i<=n-1; i++) { d->ptr.p_double[i] = d->ptr.p_double[i]/v; } } } /************************************************************************* This function recalculates constraints - activates and deactivates them according to gradient value at current point. Algorithm assumes that we want to make Quasi-Newton step from current point with diagonal Quasi-Newton matrix H. Constraints are activated and deactivated in such way that we won't violate any constraint by step. Only already "active" and "candidate" elements of ActiveSet are examined; constraints which are not active are not examined. INPUT PARAMETERS: State - active set object GC - array[N], gradient at XC H - array[N], Hessian matrix OUTPUT PARAMETERS: State - active set object, with new set of constraint -- ALGLIB -- Copyright 26.09.2012 by Bochkanov Sergey *************************************************************************/ static void sactivesets_reactivateconstraints(sactiveset* state, /* Real */ ae_vector* gc, /* Real */ ae_vector* h, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t i; ae_int_t j; ae_int_t idx0; ae_int_t idx1; double v; ae_int_t nactivebnd; ae_int_t nactivelin; ae_int_t nactiveconstraints; double rowscale; ae_assert(state->algostate==1, "SASReactivateConstraintsPrec: must be in optimization mode", _state); /* * Prepare */ n = state->n; nec = state->nec; nic = state->nic; state->basisisready = ae_false; /* * Handle important special case - no linear constraints, * only boundary constraints are present */ if( nec+nic==0 ) { for(i=0; i<=n-1; i++) { if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 1; continue; } if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_greater_eq(gc->ptr.p_double[i],(double)(0)) ) { state->cstatus.ptr.p_int[i] = 1; continue; } if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_less_eq(gc->ptr.p_double[i],(double)(0)) ) { state->cstatus.ptr.p_int[i] = 1; continue; } state->cstatus.ptr.p_int[i] = -1; } return; } /* * General case. * Allocate temporaries. */ rvectorsetlengthatleast(&state->rctmpg, n, _state); rvectorsetlengthatleast(&state->rctmprightpart, n, _state); rvectorsetlengthatleast(&state->rctmps, n, _state); rmatrixsetlengthatleast(&state->rctmpdense0, n, nec+nic, _state); rmatrixsetlengthatleast(&state->rctmpdense1, n, nec+nic, _state); bvectorsetlengthatleast(&state->rctmpisequality, n+nec+nic, _state); ivectorsetlengthatleast(&state->rctmpconstraintidx, n+nec+nic, _state); /* * Calculate descent direction */ ae_v_moveneg(&state->rctmpg.ptr.p_double[0], 1, &gc->ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Determine candidates to the active set. * * After this block constraints become either "inactive" (CStatus[i]<0) * or "candidates" (CStatus[i]=0). Previously active constraints always * become "candidates". */ for(i=0; i<=n-1; i++) { state->cstatus.ptr.p_int[i] = -1; } for(i=n; i<=n+nec+nic-1; i++) { if( state->cstatus.ptr.p_int[i]>0 ) { state->cstatus.ptr.p_int[i] = 0; } else { state->cstatus.ptr.p_int[i] = -1; } } nactiveconstraints = 0; nactivebnd = 0; nactivelin = 0; for(i=0; i<=n-1; i++) { /* * Activate boundary constraints: * * copy constraint index to RCTmpConstraintIdx * * set corresponding element of CStatus[] to "candidate" * * fill RCTmpS by either +1 (lower bound) or -1 (upper bound) * * set RCTmpIsEquality to False (BndLhasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { /* * Equality constraint is activated */ state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; state->cstatus.ptr.p_int[i] = 0; state->rctmps.ptr.p_double[i] = 1.0; state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_true; nactiveconstraints = nactiveconstraints+1; nactivebnd = nactivebnd+1; continue; } if( state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { /* * Lower bound is activated */ state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; state->cstatus.ptr.p_int[i] = 0; state->rctmps.ptr.p_double[i] = -1.0; state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_false; nactiveconstraints = nactiveconstraints+1; nactivebnd = nactivebnd+1; continue; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { /* * Upper bound is activated */ state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; state->cstatus.ptr.p_int[i] = 0; state->rctmps.ptr.p_double[i] = 1.0; state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_false; nactiveconstraints = nactiveconstraints+1; nactivebnd = nactivebnd+1; continue; } } for(i=0; i<=nec+nic-1; i++) { if( i>=nec&&state->cstatus.ptr.p_int[n+i]<0 ) { /* * Inequality constraints are skipped if both (a) constraint was * not active, and (b) we are too far away from the boundary. */ rowscale = 0.0; v = -state->cleic.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = v+state->cleic.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; rowscale = ae_maxreal(rowscale, ae_fabs(state->cleic.ptr.pp_double[i][j]*state->s.ptr.p_double[j], _state), _state); } if( ae_fp_less_eq(v,-1.0E5*ae_machineepsilon*rowscale) ) { /* * NOTE: it is important to check for non-strict inequality * because we have to correctly handle zero constraint * 0*x<=0 */ continue; } } ae_v_move(&state->rctmpdense0.ptr.pp_double[0][nactivelin], state->rctmpdense0.stride, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = n+i; state->cstatus.ptr.p_int[n+i] = 0; state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ihasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cstatus.ptr.p_int[i] = 1; continue; } if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_greater_eq(gc->ptr.p_double[i],(double)(0)) ) { state->cstatus.ptr.p_int[i] = 1; continue; } if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_less_eq(gc->ptr.p_double[i],(double)(0)) ) { state->cstatus.ptr.p_int[i] = 1; continue; } } return; } /* * General case. * * APPROACH TO CONSTRAINTS ACTIVATION/DEACTIVATION * * We have NActiveConstraints "candidates": NActiveBnd boundary candidates, * NActiveLin linear candidates. Indexes of boundary constraints are stored * in RCTmpConstraintIdx[0:NActiveBnd-1], indexes of linear ones are stored * in RCTmpConstraintIdx[NActiveBnd:NActiveBnd+NActiveLin-1]. Some of the * constraints are equality ones, some are inequality - as specified by * RCTmpIsEquality[i]. * * Now we have to determine active subset of "candidates" set. In order to * do so we solve following constrained minimization problem: * ( )^2 * min ( SUM(lambda[i]*A[i]) + G ) * ( ) * Here: * * G is a gradient (column vector) * * A[i] is a column vector, linear (left) part of I-th constraint. * I=0..NActiveConstraints-1, first NActiveBnd elements of A are just * subset of identity matrix (boundary constraints), next NActiveLin * elements are subset of rows of the matrix of general linear constraints. * * lambda[i] is a Lagrange multiplier corresponding to I-th constraint * * NOTE: for preconditioned setting A is replaced by A*H^(-0.5), G is * replaced by G*H^(-0.5). We apply this scaling at the last stage, * before passing data to NNLS solver. * * Minimization is performed subject to non-negativity constraints on * lambda[i] corresponding to inequality constraints. Inequality constraints * which correspond to non-zero lambda are activated, equality constraints * are always considered active. * * Informally speaking, we "decompose" descent direction -G and represent * it as sum of constraint vectors and "residual" part (which is equal to * the actual descent direction subject to constraints). * * SOLUTION OF THE NNLS PROBLEM * * We solve this optimization problem with Non-Negative Least Squares solver, * which can efficiently solve least squares problems of the form * * ( [ I | AU ] )^2 * min ( [ | ]*x-b ) s.t. non-negativity constraints on some x[i] * ( [ 0 | AL ] ) * * In order to use this solver we have to rearrange rows of A[] and G in * such way that first NActiveBnd columns of A store identity matrix (before * sorting non-zero elements are randomly distributed in the first NActiveBnd * columns of A, during sorting we move them to first NActiveBnd rows). * * Then we create instance of NNLS solver (we reuse instance left from the * previous run of the optimization problem) and solve NNLS problem. */ idx0 = 0; idx1 = nactivebnd; for(i=0; i<=n-1; i++) { if( state->cstatus.ptr.p_int[i]>=0 ) { v = 1/ae_sqrt(h->ptr.p_double[i], _state); for(j=0; j<=nactivelin-1; j++) { state->rctmpdense1.ptr.pp_double[idx0][j] = state->rctmpdense0.ptr.pp_double[i][j]/state->rctmps.ptr.p_double[i]*v; } state->rctmprightpart.ptr.p_double[idx0] = state->rctmpg.ptr.p_double[i]/state->rctmps.ptr.p_double[i]*v; idx0 = idx0+1; } else { v = 1/ae_sqrt(h->ptr.p_double[i], _state); for(j=0; j<=nactivelin-1; j++) { state->rctmpdense1.ptr.pp_double[idx1][j] = state->rctmpdense0.ptr.pp_double[i][j]*v; } state->rctmprightpart.ptr.p_double[idx1] = state->rctmpg.ptr.p_double[i]*v; idx1 = idx1+1; } } snnlsinit(n, ae_minint(nec+nic, n, _state), n, &state->solver, _state); snnlssetproblem(&state->solver, &state->rctmpdense1, &state->rctmprightpart, nactivebnd, nactiveconstraints-nactivebnd, n, _state); for(i=0; i<=nactiveconstraints-1; i++) { if( state->rctmpisequality.ptr.p_bool[i] ) { snnlsdropnnc(&state->solver, i, _state); } } snnlssolve(&state->solver, &state->rctmplambdas, _state); /* * After solution of the problem we activate equality constraints (always active) * and inequality constraints with non-zero Lagrange multipliers. Then we reorthogonalize * active constraints. */ for(i=0; i<=n+nec+nic-1; i++) { state->cstatus.ptr.p_int[i] = -1; } for(i=0; i<=nactiveconstraints-1; i++) { if( state->rctmpisequality.ptr.p_bool[i]||ae_fp_greater(state->rctmplambdas.ptr.p_double[i],(double)(0)) ) { state->cstatus.ptr.p_int[state->rctmpconstraintidx.ptr.p_int[i]] = 1; } else { state->cstatus.ptr.p_int[state->rctmpconstraintidx.ptr.p_int[i]] = 0; } } sasrebuildbasis(state, _state); } void _sactiveset_init(void* _p, ae_state *_state, ae_bool make_automatic) { sactiveset *p = (sactiveset*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->h, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cstatus, 0, DT_INT, _state, make_automatic); ae_matrix_init(&p->sdensebatch, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->pdensebatch, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->idensebatch, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sparsebatch, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->mtnew, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->mtx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->mtas, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->cdtmp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->corrtmp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->unitdiagonal, 0, DT_REAL, _state, make_automatic); _snnlssolver_init(&p->solver, _state, make_automatic); ae_vector_init(&p->scntmp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpfeas, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmpm0, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rctmps, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rctmpg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rctmprightpart, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->rctmpdense0, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->rctmpdense1, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rctmpisequality, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->rctmpconstraintidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->rctmplambdas, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmpbasis, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpnormestimates, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpreciph, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpprodp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpprods, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpcp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpcs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpci, 0, DT_REAL, _state, make_automatic); } void _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { sactiveset *dst = (sactiveset*)_dst; sactiveset *src = (sactiveset*)_src; dst->n = src->n; dst->algostate = src->algostate; ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); dst->hasxc = src->hasxc; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->h, &src->h, _state, make_automatic); ae_vector_init_copy(&dst->cstatus, &src->cstatus, _state, make_automatic); dst->basisisready = src->basisisready; ae_matrix_init_copy(&dst->sdensebatch, &src->sdensebatch, _state, make_automatic); ae_matrix_init_copy(&dst->pdensebatch, &src->pdensebatch, _state, make_automatic); ae_matrix_init_copy(&dst->idensebatch, &src->idensebatch, _state, make_automatic); dst->densebatchsize = src->densebatchsize; ae_vector_init_copy(&dst->sparsebatch, &src->sparsebatch, _state, make_automatic); dst->sparsebatchsize = src->sparsebatchsize; dst->basisage = src->basisage; dst->feasinitpt = src->feasinitpt; dst->constraintschanged = src->constraintschanged; ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic); dst->nec = src->nec; dst->nic = src->nic; ae_vector_init_copy(&dst->mtnew, &src->mtnew, _state, make_automatic); ae_vector_init_copy(&dst->mtx, &src->mtx, _state, make_automatic); ae_vector_init_copy(&dst->mtas, &src->mtas, _state, make_automatic); ae_vector_init_copy(&dst->cdtmp, &src->cdtmp, _state, make_automatic); ae_vector_init_copy(&dst->corrtmp, &src->corrtmp, _state, make_automatic); ae_vector_init_copy(&dst->unitdiagonal, &src->unitdiagonal, _state, make_automatic); _snnlssolver_init_copy(&dst->solver, &src->solver, _state, make_automatic); ae_vector_init_copy(&dst->scntmp, &src->scntmp, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmpfeas, &src->tmpfeas, _state, make_automatic); ae_matrix_init_copy(&dst->tmpm0, &src->tmpm0, _state, make_automatic); ae_vector_init_copy(&dst->rctmps, &src->rctmps, _state, make_automatic); ae_vector_init_copy(&dst->rctmpg, &src->rctmpg, _state, make_automatic); ae_vector_init_copy(&dst->rctmprightpart, &src->rctmprightpart, _state, make_automatic); ae_matrix_init_copy(&dst->rctmpdense0, &src->rctmpdense0, _state, make_automatic); ae_matrix_init_copy(&dst->rctmpdense1, &src->rctmpdense1, _state, make_automatic); ae_vector_init_copy(&dst->rctmpisequality, &src->rctmpisequality, _state, make_automatic); ae_vector_init_copy(&dst->rctmpconstraintidx, &src->rctmpconstraintidx, _state, make_automatic); ae_vector_init_copy(&dst->rctmplambdas, &src->rctmplambdas, _state, make_automatic); ae_matrix_init_copy(&dst->tmpbasis, &src->tmpbasis, _state, make_automatic); ae_vector_init_copy(&dst->tmpnormestimates, &src->tmpnormestimates, _state, make_automatic); ae_vector_init_copy(&dst->tmpreciph, &src->tmpreciph, _state, make_automatic); ae_vector_init_copy(&dst->tmpprodp, &src->tmpprodp, _state, make_automatic); ae_vector_init_copy(&dst->tmpprods, &src->tmpprods, _state, make_automatic); ae_vector_init_copy(&dst->tmpcp, &src->tmpcp, _state, make_automatic); ae_vector_init_copy(&dst->tmpcs, &src->tmpcs, _state, make_automatic); ae_vector_init_copy(&dst->tmpci, &src->tmpci, _state, make_automatic); } void _sactiveset_clear(void* _p) { sactiveset *p = (sactiveset*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->xc); ae_vector_clear(&p->s); ae_vector_clear(&p->h); ae_vector_clear(&p->cstatus); ae_matrix_clear(&p->sdensebatch); ae_matrix_clear(&p->pdensebatch); ae_matrix_clear(&p->idensebatch); ae_vector_clear(&p->sparsebatch); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_matrix_clear(&p->cleic); ae_vector_clear(&p->mtnew); ae_vector_clear(&p->mtx); ae_vector_clear(&p->mtas); ae_vector_clear(&p->cdtmp); ae_vector_clear(&p->corrtmp); ae_vector_clear(&p->unitdiagonal); _snnlssolver_clear(&p->solver); ae_vector_clear(&p->scntmp); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmpfeas); ae_matrix_clear(&p->tmpm0); ae_vector_clear(&p->rctmps); ae_vector_clear(&p->rctmpg); ae_vector_clear(&p->rctmprightpart); ae_matrix_clear(&p->rctmpdense0); ae_matrix_clear(&p->rctmpdense1); ae_vector_clear(&p->rctmpisequality); ae_vector_clear(&p->rctmpconstraintidx); ae_vector_clear(&p->rctmplambdas); ae_matrix_clear(&p->tmpbasis); ae_vector_clear(&p->tmpnormestimates); ae_vector_clear(&p->tmpreciph); ae_vector_clear(&p->tmpprodp); ae_vector_clear(&p->tmpprods); ae_vector_clear(&p->tmpcp); ae_vector_clear(&p->tmpcs); ae_vector_clear(&p->tmpci); } void _sactiveset_destroy(void* _p) { sactiveset *p = (sactiveset*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->s); ae_vector_destroy(&p->h); ae_vector_destroy(&p->cstatus); ae_matrix_destroy(&p->sdensebatch); ae_matrix_destroy(&p->pdensebatch); ae_matrix_destroy(&p->idensebatch); ae_vector_destroy(&p->sparsebatch); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_matrix_destroy(&p->cleic); ae_vector_destroy(&p->mtnew); ae_vector_destroy(&p->mtx); ae_vector_destroy(&p->mtas); ae_vector_destroy(&p->cdtmp); ae_vector_destroy(&p->corrtmp); ae_vector_destroy(&p->unitdiagonal); _snnlssolver_destroy(&p->solver); ae_vector_destroy(&p->scntmp); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmpfeas); ae_matrix_destroy(&p->tmpm0); ae_vector_destroy(&p->rctmps); ae_vector_destroy(&p->rctmpg); ae_vector_destroy(&p->rctmprightpart); ae_matrix_destroy(&p->rctmpdense0); ae_matrix_destroy(&p->rctmpdense1); ae_vector_destroy(&p->rctmpisequality); ae_vector_destroy(&p->rctmpconstraintidx); ae_vector_destroy(&p->rctmplambdas); ae_matrix_destroy(&p->tmpbasis); ae_vector_destroy(&p->tmpnormestimates); ae_vector_destroy(&p->tmpreciph); ae_vector_destroy(&p->tmpprodp); ae_vector_destroy(&p->tmpprods); ae_vector_destroy(&p->tmpcp); ae_vector_destroy(&p->tmpcs); ae_vector_destroy(&p->tmpci); } #endif #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This function initializes QQPSettings structure with default settings. Newly created structure MUST be initialized by default settings - or by copy of the already initialized structure. -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qqploaddefaults(ae_int_t n, qqpsettings* s, ae_state *_state) { s->epsg = 0.0; s->epsf = 0.0; s->epsx = 1.0E-6; s->maxouterits = 0; s->cgphase = ae_true; s->cnphase = ae_true; s->cgminits = 5; s->cgmaxits = ae_maxint(s->cgminits, ae_round(1+0.33*n, _state), _state); s->sparsesolver = 0; s->cnmaxupdates = ae_round(1+0.1*n, _state); } /************************************************************************* This function initializes QQPSettings structure with copy of another, already initialized structure. -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qqpcopysettings(qqpsettings* src, qqpsettings* dst, ae_state *_state) { dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxouterits = src->maxouterits; dst->cgphase = src->cgphase; dst->cnphase = src->cnphase; dst->cgminits = src->cgminits; dst->cgmaxits = src->cgmaxits; dst->sparsesolver = src->sparsesolver; dst->cnmaxupdates = src->cnmaxupdates; } /************************************************************************* This function performs preallocation of internal 2D matrices. If matrix size is less than expected, we grow to some larger value (specified by user). It can be useful in cases when we solve many subsequent QP problems with increasing sizes - helps to avoid multiple allocations. INPUT PARAMETERS: SState - object which stores temporaries: * uninitialized object is automatically initialized * previously allocated memory is reused as much as possible NExpected - if internal buffers have size enough for NExpected, no preallocation happens. If size is less than NExpected, buffers are preallocated up to NGrowTo*NGrowTo NGrowTo - new size OUTPUT PARAMETERS: SState - temporary buffers, some of them are preallocated -- ALGLIB -- Copyright 09.10.2017 by Bochkanov Sergey *************************************************************************/ void qqppreallocategrowdense(qqpbuffers* sstate, ae_int_t nexpected, ae_int_t ngrowto, ae_state *_state) { if( sstate->densea.rowsdensea.colsdensea, ngrowto, ngrowto, _state); } if( sstate->densez.rowsdensez.colsdensez, ngrowto, ngrowto, _state); } } /************************************************************************* This function runs QQP solver; it returns after optimization process was completed. Following QP problem is solved: min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) subject to boundary constraints. IMPORTANT: UNLIKE MANY OTHER SOLVERS, THIS FUNCTION DOES NOT REQUIRE YOU TO INITIALIZE STATE OBJECT. IT CAN BE AUTOMATICALLY INITIALIZED DURING SOLUTION PROCESS. INPUT PARAMETERS: AC - for dense problems given by CQM model (AKind=0) A-term of CQM object contains system matrix. Other terms are unspecified and should not be referenced. SparseAC - for sparse problems (AKind=1) DenseAC - for traditional dense matrices (AKind=2) AKind - matrix term to use: * 0 for dense CQM (CQMAC) * 1 for sparse matrix (SparseAC) * 2 for dense matrix (DenseAC) IsUpper - which triangle of SparseAC/DenseAC stores matrix - upper or lower one (for dense matrices this parameter is not actual). BC - linear term, array[NC] BndLC - lower bound, array[NC] BndUC - upper bound, array[NC] SC - scale vector, array[NC]: * I-th element contains scale of I-th variable, * SC[I]>0 XOriginC - origin term, array[NC]. Can be zero. NC - number of variables in the original formulation (no slack variables). CLEICC - linear equality/inequality constraints. Present version of this function does NOT provide publicly available support for linear constraints. This feature will be introduced in the future versions of the function. NEC, NIC - number of equality/inequality constraints. MUST BE ZERO IN THE CURRENT VERSION!!! Settings - QQPSettings object initialized by one of the initialization functions. SState - object which stores temporaries: * uninitialized object is automatically initialized * previously allocated memory is reused as much as possible XS - initial point, array[NC] OUTPUT PARAMETERS: XS - last point TerminationType-termination type: * * * -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qqpoptimize(convexquadraticmodel* cqmac, sparsematrix* sparseac, /* Real */ ae_matrix* denseac, ae_int_t akind, ae_bool isupper, /* Real */ ae_vector* bc, /* Real */ ae_vector* bndlc, /* Real */ ae_vector* bnduc, /* Real */ ae_vector* sc, /* Real */ ae_vector* xoriginc, ae_int_t nc, qqpsettings* settings, qqpbuffers* sstate, /* Real */ ae_vector* xs, ae_int_t* terminationtype, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; ae_int_t k; double v; double vv; double d2; double d1; ae_int_t d1est; ae_int_t d2est; ae_bool needact; double reststp; double fullstp; double stpmax; double stp; ae_int_t stpcnt; ae_int_t cidx; double cval; ae_int_t cgcnt; ae_int_t cgmax; ae_int_t newtcnt; ae_int_t sparsesolver; double beta; ae_bool b; double fprev; double fcur; ae_bool problemsolved; ae_bool isconstrained; double f0; double f1; *terminationtype = 0; /* * Primary checks */ ae_assert((akind==0||akind==1)||akind==2, "QQPOptimize: incorrect AKind", _state); sstate->n = nc; n = sstate->n; *terminationtype = 0; sstate->repinneriterationscount = 0; sstate->repouteriterationscount = 0; sstate->repncholesky = 0; sstate->repncupdates = 0; /* * Several checks * * matrix size * * scale vector * * consistency of bound constraints * * consistency of settings */ if( akind==1 ) { ae_assert(sparsegetnrows(sparseac, _state)==n, "QQPOptimize: rows(SparseAC)<>N", _state); ae_assert(sparsegetncols(sparseac, _state)==n, "QQPOptimize: cols(SparseAC)<>N", _state); } for(i=0; i<=n-1; i++) { ae_assert(ae_isfinite(sc->ptr.p_double[i], _state)&&ae_fp_greater(sc->ptr.p_double[i],(double)(0)), "QQPOptimize: incorrect scale", _state); } for(i=0; i<=n-1; i++) { if( ae_isfinite(bndlc->ptr.p_double[i], _state)&&ae_isfinite(bnduc->ptr.p_double[i], _state) ) { if( ae_fp_greater(bndlc->ptr.p_double[i],bnduc->ptr.p_double[i]) ) { *terminationtype = -3; return; } } } ae_assert(settings->cgphase||settings->cnphase, "QQPOptimize: both phases (CG and Newton) are inactive", _state); /* * Allocate data structures */ rvectorsetlengthatleast(&sstate->bndl, n, _state); rvectorsetlengthatleast(&sstate->bndu, n, _state); bvectorsetlengthatleast(&sstate->havebndl, n, _state); bvectorsetlengthatleast(&sstate->havebndu, n, _state); rvectorsetlengthatleast(&sstate->xs, n, _state); rvectorsetlengthatleast(&sstate->xf, n, _state); rvectorsetlengthatleast(&sstate->xp, n, _state); rvectorsetlengthatleast(&sstate->gc, n, _state); rvectorsetlengthatleast(&sstate->cgc, n, _state); rvectorsetlengthatleast(&sstate->cgp, n, _state); rvectorsetlengthatleast(&sstate->dc, n, _state); rvectorsetlengthatleast(&sstate->dp, n, _state); rvectorsetlengthatleast(&sstate->tmp0, n, _state); rvectorsetlengthatleast(&sstate->tmp1, n, _state); rvectorsetlengthatleast(&sstate->stpbuf, 15, _state); sasinit(n, &sstate->sas, _state); /* * Scale/shift problem coefficients: * * min { 0.5*(x-x0)'*A*(x-x0) + b'*(x-x0) } * * becomes (after transformation "x = S*y+x0") * * min { 0.5*y'*(S*A*S)*y + (S*b)'*y * * Modified A_mod=S*A*S and b_mod=S*(b+A*x0) are * stored into SState.DenseA and SState.B. * */ rvectorsetlengthatleast(&sstate->b, n, _state); for(i=0; i<=n-1; i++) { sstate->b.ptr.p_double[i] = sc->ptr.p_double[i]*bc->ptr.p_double[i]; } sstate->akind = -99; if( akind==0 ) { /* * Dense QP problem - just copy and scale. */ rmatrixsetlengthatleast(&sstate->densea, n, n, _state); cqmgeta(cqmac, &sstate->densea, _state); sstate->akind = 0; sstate->absamax = (double)(0); sstate->absasum = (double)(0); sstate->absasum2 = (double)(0); for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { v = sc->ptr.p_double[i]*sstate->densea.ptr.pp_double[i][j]*sc->ptr.p_double[j]; vv = ae_fabs(v, _state); sstate->densea.ptr.pp_double[i][j] = v; sstate->absamax = ae_maxreal(sstate->absamax, vv, _state); sstate->absasum = sstate->absasum+vv; sstate->absasum2 = sstate->absasum2+vv*vv; } } } if( akind==1 ) { /* * Sparse QP problem - a bit tricky. Depending on format of the * input we use different strategies for copying matrix: * * SKS matrices are copied to SKS format * * anything else is copied to CRS format */ sparsecopytosksbuf(sparseac, &sstate->sparsea, _state); if( isupper ) { sparsetransposesks(&sstate->sparsea, _state); } sstate->akind = 1; sstate->sparseupper = ae_false; sstate->absamax = (double)(0); sstate->absasum = (double)(0); sstate->absasum2 = (double)(0); for(i=0; i<=n-1; i++) { k = sstate->sparsea.ridx.ptr.p_int[i]; for(j=i-sstate->sparsea.didx.ptr.p_int[i]; j<=i; j++) { v = sc->ptr.p_double[i]*sstate->sparsea.vals.ptr.p_double[k]*sc->ptr.p_double[j]; vv = ae_fabs(v, _state); sstate->sparsea.vals.ptr.p_double[k] = v; if( i==j ) { /* * Diagonal terms are counted only once */ sstate->absamax = ae_maxreal(sstate->absamax, vv, _state); sstate->absasum = sstate->absasum+vv; sstate->absasum2 = sstate->absasum2+vv*vv; } else { /* * Offdiagonal terms are counted twice */ sstate->absamax = ae_maxreal(sstate->absamax, vv, _state); sstate->absasum = sstate->absasum+2*vv; sstate->absasum2 = sstate->absasum2+2*vv*vv; } k = k+1; } } } if( akind==2 ) { /* * Dense QP problem - just copy and scale. */ rmatrixsetlengthatleast(&sstate->densea, n, n, _state); sstate->akind = 0; sstate->absamax = (double)(0); sstate->absasum = (double)(0); sstate->absasum2 = (double)(0); if( isupper ) { for(i=0; i<=n-1; i++) { for(j=i; j<=n-1; j++) { v = sc->ptr.p_double[i]*denseac->ptr.pp_double[i][j]*sc->ptr.p_double[j]; vv = ae_fabs(v, _state); sstate->densea.ptr.pp_double[i][j] = v; sstate->densea.ptr.pp_double[j][i] = v; if( ae_fp_eq((double)(i),v) ) { k = 1; } else { k = 2; } sstate->absamax = ae_maxreal(sstate->absamax, vv, _state); sstate->absasum = sstate->absasum+vv*k; sstate->absasum2 = sstate->absasum2+vv*vv*k; } } } else { for(i=0; i<=n-1; i++) { for(j=0; j<=i; j++) { v = sc->ptr.p_double[i]*denseac->ptr.pp_double[i][j]*sc->ptr.p_double[j]; vv = ae_fabs(v, _state); sstate->densea.ptr.pp_double[i][j] = v; sstate->densea.ptr.pp_double[j][i] = v; if( ae_fp_eq((double)(i),v) ) { k = 1; } else { k = 2; } sstate->absamax = ae_maxreal(sstate->absamax, vv, _state); sstate->absasum = sstate->absasum+vv*k; sstate->absasum2 = sstate->absasum2+vv*vv*k; } } } } ae_assert(sstate->akind>=0, "QQP: integrity check failed", _state); /* * Load box constraints into State structure. * * We apply transformation to variables: y=(x-x_origin)/s, * each of the constraints is appropriately shifted/scaled. */ for(i=0; i<=n-1; i++) { sstate->havebndl.ptr.p_bool[i] = ae_isfinite(bndlc->ptr.p_double[i], _state); if( sstate->havebndl.ptr.p_bool[i] ) { sstate->bndl.ptr.p_double[i] = (bndlc->ptr.p_double[i]-xoriginc->ptr.p_double[i])/sc->ptr.p_double[i]; } else { ae_assert(ae_isneginf(bndlc->ptr.p_double[i], _state), "QQPOptimize: incorrect lower bound", _state); sstate->bndl.ptr.p_double[i] = _state->v_neginf; } sstate->havebndu.ptr.p_bool[i] = ae_isfinite(bnduc->ptr.p_double[i], _state); if( sstate->havebndu.ptr.p_bool[i] ) { sstate->bndu.ptr.p_double[i] = (bnduc->ptr.p_double[i]-xoriginc->ptr.p_double[i])/sc->ptr.p_double[i]; } else { ae_assert(ae_isposinf(bnduc->ptr.p_double[i], _state), "QQPOptimize: incorrect upper bound", _state); sstate->bndu.ptr.p_double[i] = _state->v_posinf; } } /* * Process initial point: * * set it to XS-XOriginC * * make sure that boundary constraints are preserved by transformation */ for(i=0; i<=n-1; i++) { sstate->xs.ptr.p_double[i] = (xs->ptr.p_double[i]-xoriginc->ptr.p_double[i])/sc->ptr.p_double[i]; if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_less(sstate->xs.ptr.p_double[i],sstate->bndl.ptr.p_double[i]) ) { sstate->xs.ptr.p_double[i] = sstate->bndl.ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_greater(sstate->xs.ptr.p_double[i],sstate->bndu.ptr.p_double[i]) ) { sstate->xs.ptr.p_double[i] = sstate->bndu.ptr.p_double[i]; } if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(xs->ptr.p_double[i],bndlc->ptr.p_double[i]) ) { sstate->xs.ptr.p_double[i] = sstate->bndl.ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(xs->ptr.p_double[i],bnduc->ptr.p_double[i]) ) { sstate->xs.ptr.p_double[i] = sstate->bndu.ptr.p_double[i]; } } /* * Select sparse direct solver */ if( akind==1 ) { sparsesolver = settings->sparsesolver; if( sparsesolver==0 ) { sparsesolver = 1; } if( sparseissks(&sstate->sparsea, _state) ) { sparsesolver = 2; } sparsesolver = 2; ae_assert(sparsesolver==1||sparsesolver==2, "QQPOptimize: incorrect SparseSolver", _state); } else { sparsesolver = 0; } /* * For unconstrained problems - try to use fast approach which requires * just one unregularized Cholesky decomposition for solution. If it fails, * switch to general QQP code. */ problemsolved = ae_false; isconstrained = ae_false; for(i=0; i<=n-1; i++) { isconstrained = (isconstrained||sstate->havebndl.ptr.p_bool[i])||sstate->havebndu.ptr.p_bool[i]; } if( (!isconstrained&&settings->cnphase)&&akind==0 ) { rmatrixsetlengthatleast(&sstate->densez, n, n, _state); rvectorsetlengthatleast(&sstate->tmpcn, n, _state); for(i=0; i<=n-1; i++) { for(j=i; j<=n-1; j++) { sstate->densez.ptr.pp_double[i][j] = sstate->densea.ptr.pp_double[i][j]; } } inc(&sstate->repncholesky, _state); if( spdmatrixcholeskyrec(&sstate->densez, 0, n, ae_true, &sstate->tmpcn, _state) ) { ae_v_move(&sstate->xf.ptr.p_double[0], 1, &sstate->xs.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { sstate->dc.ptr.p_double[i] = (double)(0); } f0 = qqpsolver_projectedtargetfunction(sstate, &sstate->xf, &sstate->dc, 0.0, &sstate->tmpcn, &sstate->tmp1, _state); for(k=0; k<=3; k++) { rmatrixmv(n, n, &sstate->densea, 0, 0, 0, &sstate->xf, 0, &sstate->gc, 0, _state); ae_v_add(&sstate->gc.ptr.p_double[0], 1, &sstate->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { sstate->dc.ptr.p_double[i] = -sstate->gc.ptr.p_double[i]; } fblscholeskysolve(&sstate->densez, 1.0, n, ae_true, &sstate->dc, &sstate->tmpcn, _state); f1 = qqpsolver_projectedtargetfunction(sstate, &sstate->xf, &sstate->dc, 1.0, &sstate->tmpcn, &sstate->tmp1, _state); if( ae_fp_greater_eq(f1,f0) ) { break; } ae_v_add(&sstate->xf.ptr.p_double[0], 1, &sstate->dc.ptr.p_double[0], 1, ae_v_len(0,n-1)); f0 = f1; } *terminationtype = 2; problemsolved = ae_true; } } /* * Attempt to solve problem with fast approach failed, use generic QQP */ if( !problemsolved ) { /* * Prepare "active set" structure */ sassetbc(&sstate->sas, &sstate->bndl, &sstate->bndu, _state); if( !sasstartoptimization(&sstate->sas, &sstate->xs, _state) ) { *terminationtype = -3; return; } /* * Main loop. * * Following variables are used: * * GC stores current gradient (unconstrained) * * CGC stores current gradient (constrained) * * DC stores current search direction * * CGP stores constrained gradient at previous point * (zero on initial entry) * * DP stores previous search direction * (zero on initial entry) */ cgmax = settings->cgminits; sstate->repinneriterationscount = 0; sstate->repouteriterationscount = 0; for(;;) { if( settings->maxouterits>0&&sstate->repouteriterationscount>=settings->maxouterits ) { *terminationtype = 5; break; } if( sstate->repouteriterationscount>0 ) { /* * Check EpsF- and EpsX-based stopping criteria. * Because problem was already scaled, we do not scale step before checking its length. * NOTE: these checks are performed only after at least one outer iteration was made. */ if( ae_fp_greater(settings->epsf,(double)(0)) ) { /* * NOTE 1: here we rely on the fact that ProjectedTargetFunction() ignore D when Stp=0 * NOTE 2: code below handles situation when update increases function value instead * of decreasing it. */ fprev = qqpsolver_projectedtargetfunction(sstate, &sstate->xp, &sstate->dc, 0.0, &sstate->tmp0, &sstate->tmp1, _state); fcur = qqpsolver_projectedtargetfunction(sstate, &sstate->sas.xc, &sstate->dc, 0.0, &sstate->tmp0, &sstate->tmp1, _state); if( ae_fp_less_eq(fprev-fcur,settings->epsf*ae_maxreal(ae_fabs(fprev, _state), ae_maxreal(ae_fabs(fcur, _state), 1.0, _state), _state)) ) { *terminationtype = 1; break; } } if( ae_fp_greater(settings->epsx,(double)(0)) ) { v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(sstate->xp.ptr.p_double[i]-sstate->sas.xc.ptr.p_double[i], _state); } if( ae_fp_less_eq(ae_sqrt(v, _state),settings->epsx) ) { *terminationtype = 2; break; } } } inc(&sstate->repouteriterationscount, _state); ae_v_move(&sstate->xp.ptr.p_double[0], 1, &sstate->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !settings->cgphase ) { cgmax = 0; } for(i=0; i<=n-1; i++) { sstate->cgp.ptr.p_double[i] = 0.0; sstate->dp.ptr.p_double[i] = 0.0; } for(cgcnt=0; cgcnt<=cgmax-1; cgcnt++) { /* * Calculate unconstrained gradient GC for "extended" QP problem * Determine active set, current constrained gradient CGC. * Check gradient-based stopping condition. * * NOTE: because problem was scaled, we do not have to apply scaling * to gradient before checking stopping condition. */ qqpsolver_targetgradient(sstate, &sstate->sas.xc, &sstate->gc, _state); sasreactivateconstraints(&sstate->sas, &sstate->gc, _state); ae_v_move(&sstate->cgc.ptr.p_double[0], 1, &sstate->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); sasconstraineddirection(&sstate->sas, &sstate->cgc, _state); v = ae_v_dotproduct(&sstate->cgc.ptr.p_double[0], 1, &sstate->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_fp_less_eq(ae_sqrt(v, _state),settings->epsg) ) { *terminationtype = 4; break; } /* * Prepare search direction DC and explore it. * * We try to use CGP/DP to prepare conjugate gradient step, * but we resort to steepest descent step (Beta=0) in case * we are at I-th boundary, but DP[I]<>0. * * Such approach allows us to ALWAYS have feasible DC, with * guaranteed compatibility with both feasible area and current * active set. * * Automatic CG reset performed every time DP is incompatible * with current active set and/or feasible area. We also * perform reset every QuickQPRestartCG iterations. */ ae_v_moveneg(&sstate->dc.ptr.p_double[0], 1, &sstate->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = 0.0; vv = 0.0; b = ae_false; for(i=0; i<=n-1; i++) { v = v+sstate->cgc.ptr.p_double[i]*sstate->cgc.ptr.p_double[i]; vv = vv+sstate->cgp.ptr.p_double[i]*sstate->cgp.ptr.p_double[i]; b = b||((sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndl.ptr.p_double[i]))&&ae_fp_neq(sstate->dp.ptr.p_double[i],(double)(0))); b = b||((sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndu.ptr.p_double[i]))&&ae_fp_neq(sstate->dp.ptr.p_double[i],(double)(0))); } b = b||ae_fp_eq(vv,(double)(0)); b = b||cgcnt%qqpsolver_quickqprestartcg==0; if( !b ) { beta = v/vv; } else { beta = 0.0; } ae_v_addd(&sstate->dc.ptr.p_double[0], 1, &sstate->dp.ptr.p_double[0], 1, ae_v_len(0,n-1), beta); sasconstraineddirection(&sstate->sas, &sstate->dc, _state); sasexploredirection(&sstate->sas, &sstate->dc, &stpmax, &cidx, &cval, _state); /* * Build quadratic model of F along descent direction: * * F(xc+alpha*D) = D2*alpha^2 + D1*alpha * * Terminate algorithm if needed. * * NOTE: we do not maintain constant term D0 */ qqpsolver_quadraticmodel(sstate, &sstate->sas.xc, &sstate->dc, &sstate->gc, &d1, &d1est, &d2, &d2est, &sstate->tmp0, _state); if( ae_fp_eq(d1,(double)(0))&&ae_fp_eq(d2,(double)(0)) ) { /* * D1 and D2 are exactly zero, success. * After this if-then we assume that D is non-zero. */ *terminationtype = 4; break; } if( d1est>=0 ) { /* * Numerical noise is too large, it means that we are close * to minimum - and that further improvement is impossible. * * After this if-then we assume that D1 is definitely negative * (even under presence of numerical errors). */ *terminationtype = 7; break; } if( d2est<=0&&cidx<0 ) { /* * Function is unbounded from below: * * D1<0 (verified by previous block) * * D2Est<=0, which means that either D2<0 - or it can not * be reliably distinguished from zero. * * step is unconstrained * * If these conditions are true, we abnormally terminate QP * algorithm with return code -4 */ *terminationtype = -4; break; } /* * Perform step along DC. * * In this block of code we maintain two step length: * * RestStp - restricted step, maximum step length along DC which does * not violate constraints * * FullStp - step length along DC which minimizes quadratic function * without taking constraints into account. If problem is * unbounded from below without constraints, FullStp is * forced to be RestStp. * * So, if function is convex (D2>0): * * FullStp = -D1/(2*D2) * * RestStp = restricted FullStp * * 0<=RestStp<=FullStp * * If function is non-convex, but bounded from below under constraints: * * RestStp = step length subject to constraints * * FullStp = RestStp * * After RestStp and FullStp are initialized, we generate several trial * steps which are different multiples of RestStp and FullStp. */ if( d2est>0 ) { ae_assert(ae_fp_less(d1,(double)(0)), "QQPOptimize: internal error", _state); fullstp = -d1/(2*d2); needact = ae_fp_greater_eq(fullstp,stpmax); if( needact ) { ae_assert(sstate->stpbuf.cnt>=3, "QQPOptimize: StpBuf overflow", _state); reststp = stpmax; stp = reststp; sstate->stpbuf.ptr.p_double[0] = reststp*4; sstate->stpbuf.ptr.p_double[1] = fullstp; sstate->stpbuf.ptr.p_double[2] = fullstp/4; stpcnt = 3; } else { reststp = fullstp; stp = fullstp; stpcnt = 0; } } else { ae_assert(cidx>=0, "QQPOptimize: internal error", _state); ae_assert(sstate->stpbuf.cnt>=2, "QQPOptimize: StpBuf overflow", _state); reststp = stpmax; fullstp = stpmax; stp = reststp; needact = ae_true; sstate->stpbuf.ptr.p_double[0] = 4*reststp; stpcnt = 1; } qqpsolver_findbeststepandmove(sstate, &sstate->sas, &sstate->dc, stp, needact, cidx, cval, &sstate->stpbuf, stpcnt, &sstate->activated, &sstate->tmp0, &sstate->tmp1, _state); /* * Update CG information. */ ae_v_move(&sstate->dp.ptr.p_double[0], 1, &sstate->dc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&sstate->cgp.ptr.p_double[0], 1, &sstate->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Update iterations counter */ sstate->repinneriterationscount = sstate->repinneriterationscount+1; } if( *terminationtype!=0 ) { break; } cgmax = settings->cgmaxits; /* * Generate YIdx - reordering of variables for constrained Newton phase. * Free variables come first, fixed are last ones. */ newtcnt = 0; for(;;) { /* * Skip iteration if constrained Newton is turned off. */ if( !settings->cnphase ) { break; } /* * At the first iteration - build Cholesky decomposition of Hessian. * At subsequent iterations - refine Hessian by adding new constraints. * * Loop is terminated in following cases: * * Hessian is not positive definite subject to current constraints * (termination during initial decomposition) * * there were no new constraints being activated * (termination during update) * * all constraints were activated during last step * (termination during update) * * CNMaxUpdates were performed on matrix * (termination during update) */ if( newtcnt==0 ) { /* * Perform initial Newton step. If Cholesky decomposition fails, * increase number of CG iterations to CGMaxIts - it should help * us to find set of constraints which will make matrix positive * definite. */ b = qqpsolver_cnewtonbuild(sstate, sparsesolver, &sstate->repncholesky, _state); if( b ) { cgmax = settings->cgminits; } } else { b = qqpsolver_cnewtonupdate(sstate, settings, &sstate->repncupdates, _state); } if( !b ) { break; } inc(&newtcnt, _state); /* * Calculate gradient GC. */ qqpsolver_targetgradient(sstate, &sstate->sas.xc, &sstate->gc, _state); /* * Bound-constrained Newton step */ for(i=0; i<=n-1; i++) { sstate->dc.ptr.p_double[i] = sstate->gc.ptr.p_double[i]; } if( !qqpsolver_cnewtonstep(sstate, settings, &sstate->dc, _state) ) { break; } qqpsolver_quadraticmodel(sstate, &sstate->sas.xc, &sstate->dc, &sstate->gc, &d1, &d1est, &d2, &d2est, &sstate->tmp0, _state); if( d1est>=0 ) { /* * We are close to minimum, derivative is nearly zero, break Newton iteration */ break; } if( d2est>0 ) { /* * Positive definite matrix, we can perform Newton step */ ae_assert(ae_fp_less(d1,(double)(0)), "QQPOptimize: internal error", _state); fullstp = -d1/(2*d2); sasexploredirection(&sstate->sas, &sstate->dc, &stpmax, &cidx, &cval, _state); needact = ae_fp_greater_eq(fullstp,stpmax); if( needact ) { ae_assert(sstate->stpbuf.cnt>=3, "QQPOptimize: StpBuf overflow", _state); reststp = stpmax; stp = reststp; sstate->stpbuf.ptr.p_double[0] = reststp*4; sstate->stpbuf.ptr.p_double[1] = fullstp; sstate->stpbuf.ptr.p_double[2] = fullstp/4; stpcnt = 3; } else { reststp = fullstp; stp = fullstp; stpcnt = 0; } qqpsolver_findbeststepandmove(sstate, &sstate->sas, &sstate->dc, stp, needact, cidx, cval, &sstate->stpbuf, stpcnt, &sstate->activated, &sstate->tmp0, &sstate->tmp1, _state); } else { /* * Matrix is semi-definite or indefinite, but regularized * Cholesky succeeded and gave us descent direction in DC. * * We will investigate it and try to perform descent step: * * first, we explore direction: * * if it is unbounded, we stop algorithm with * appropriate termination code -4. * * if StpMax=0, we break Newton phase and return to * CG phase - constraint geometry is complicated near * current point, so it is better to use simpler algo. * * second, we check that bounded step decreases function; * if not, we again skip to CG phase * * finally, we use FindBestStep...() function to choose * between bounded step and projection of full-length step * (latter may give additional decrease in */ sasexploredirection(&sstate->sas, &sstate->dc, &stpmax, &cidx, &cval, _state); if( cidx<0 ) { /* * Function is unbounded from below: * * D1<0 (verified by previous block) * * D2Est<=0, which means that either D2<0 - or it can not * be reliably distinguished from zero. * * step is unconstrained * * If these conditions are true, we abnormally terminate QP * algorithm with return code -4 */ *terminationtype = -4; break; } if( ae_fp_eq(stpmax,(double)(0)) ) { /* * Resort to CG phase. * Increase number of CG iterations. */ cgmax = settings->cgmaxits; break; } ae_assert(ae_fp_greater(stpmax,(double)(0)), "QQPOptimize: internal error", _state); f0 = qqpsolver_projectedtargetfunction(sstate, &sstate->sas.xc, &sstate->dc, 0.0, &sstate->tmp0, &sstate->tmp1, _state); f1 = qqpsolver_projectedtargetfunction(sstate, &sstate->sas.xc, &sstate->dc, stpmax, &sstate->tmp0, &sstate->tmp1, _state); if( ae_fp_greater_eq(f1,f0) ) { /* * Descent direction does not actually decrease function value. * Resort to CG phase * Increase number of CG iterations. */ cgmax = settings->cgmaxits; break; } ae_assert(sstate->stpbuf.cnt>=3, "QQPOptimize: StpBuf overflow", _state); reststp = stpmax; stp = reststp; sstate->stpbuf.ptr.p_double[0] = reststp*4; sstate->stpbuf.ptr.p_double[1] = 1.00; sstate->stpbuf.ptr.p_double[2] = 0.25; stpcnt = 3; qqpsolver_findbeststepandmove(sstate, &sstate->sas, &sstate->dc, stp, ae_true, cidx, cval, &sstate->stpbuf, stpcnt, &sstate->activated, &sstate->tmp0, &sstate->tmp1, _state); } } if( *terminationtype!=0 ) { break; } } sasstopoptimization(&sstate->sas, _state); ae_v_move(&sstate->xf.ptr.p_double[0], 1, &sstate->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); } /* * Stop optimization and unpack results. * * Add XOriginC to XS and make sure that boundary constraints are * both (a) satisfied, (b) preserved. Former means that "shifted" * point is feasible, while latter means that point which was exactly * at the boundary before shift will be exactly at the boundary * after shift. */ for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sc->ptr.p_double[i]*sstate->xf.ptr.p_double[i]+xoriginc->ptr.p_double[i]; if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_less(xs->ptr.p_double[i],bndlc->ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndlc->ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_greater(xs->ptr.p_double[i],bnduc->ptr.p_double[i]) ) { xs->ptr.p_double[i] = bnduc->ptr.p_double[i]; } if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(sstate->xf.ptr.p_double[i],sstate->bndl.ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndlc->ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(sstate->xf.ptr.p_double[i],sstate->bndu.ptr.p_double[i]) ) { xs->ptr.p_double[i] = bnduc->ptr.p_double[i]; } } } /************************************************************************* Target function at point PROJ(X+Stp*D), where PROJ(.) is a projection into feasible set. NOTE: if Stp=0, D is not referenced at all. Thus, there is no need to fill it by some meaningful values for Stp=0. This subroutine uses temporary buffers Tmp0/1, which are automatically resized if needed. -- ALGLIB -- Copyright 21.12.2013 by Bochkanov Sergey *************************************************************************/ static double qqpsolver_projectedtargetfunction(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* d, double stp, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, ae_state *_state) { ae_int_t n; ae_int_t i; double v; double result; n = sstate->n; rvectorsetlengthatleast(tmp0, n, _state); rvectorsetlengthatleast(tmp1, n, _state); /* * Calculate projected point */ for(i=0; i<=n-1; i++) { if( ae_fp_neq(stp,(double)(0)) ) { v = x->ptr.p_double[i]+stp*d->ptr.p_double[i]; } else { v = x->ptr.p_double[i]; } if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_less(v,sstate->bndl.ptr.p_double[i]) ) { v = sstate->bndl.ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_greater(v,sstate->bndu.ptr.p_double[i]) ) { v = sstate->bndu.ptr.p_double[i]; } tmp0->ptr.p_double[i] = v; } /* * Function value at the Tmp0: * * f(x) = 0.5*x'*A*x + b'*x */ result = 0.0; for(i=0; i<=n-1; i++) { result = result+sstate->b.ptr.p_double[i]*tmp0->ptr.p_double[i]; } if( sstate->akind==0 ) { /* * Dense matrix A */ result = result+0.5*rmatrixsyvmv(n, &sstate->densea, 0, 0, ae_true, tmp0, 0, tmp1, _state); } else { /* * sparse matrix A */ ae_assert(sstate->akind==1, "QQPOptimize: unexpected AKind in ProjectedTargetFunction", _state); result = result+0.5*sparsevsmv(&sstate->sparsea, sstate->sparseupper, tmp0, _state); } return result; } /************************************************************************* Gradient of the target function: f(x) = 0.5*x'*A*x + b'*x which is equal to grad = A*x + b Here: * x is array[N] * A is array[N,N] * b is array[N] INPUT PARAMETERS: SState - structure which stores function terms (not modified) X - location G - possibly preallocated buffer OUTPUT PARAMETERS: G - array[N], gradient -- ALGLIB -- Copyright 21.12.2013 by Bochkanov Sergey *************************************************************************/ static void qqpsolver_targetgradient(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t n; n = sstate->n; rvectorsetlengthatleast(g, n, _state); if( sstate->akind==0 ) { /* * Dense matrix A */ rmatrixsymv(n, 1.0, &sstate->densea, 0, 0, ae_true, x, 0, 0.0, g, 0, _state); } else { /* * Sparse matrix A */ ae_assert(sstate->akind==1, "QQPOptimize: unexpected AKind in TargetGradient", _state); sparsesmv(&sstate->sparsea, sstate->sparseupper, x, g, _state); } ae_v_add(&g->ptr.p_double[0], 1, &sstate->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); } /************************************************************************* First and second derivatives of the "extended" target function along specified direction. Target function is called "extended" because of additional slack variables and has form: f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) with gradient grad = A*x + b + penaltyfactor*C'*(C*x-b) Quadratic model has form F(x0+alpha*D) = D2*alpha^2 + D1*alpha INPUT PARAMETERS: SState - structure which is used to obtain quadratic term of the model X - current point, array[N] D - direction across which derivatives are calculated, array[N] G - gradient at current point (pre-calculated by caller), array[N] OUTPUT PARAMETERS: D1 - linear coefficient D1Est - estimate of D1 sign, accounting for possible numerical errors: * >0 means "almost surely positive" * <0 means "almost surely negative" * =0 means "pessimistic estimate of numerical errors in D1 is larger than magnitude of D1 itself; it is impossible to reliably distinguish D1 from zero". D2 - quadratic coefficient D2Est - estimate of D2 sign, accounting for possible numerical errors: * >0 means "almost surely positive" * <0 means "almost surely negative" * =0 means "pessimistic estimate of numerical errors in D2 is larger than magnitude of D2 itself; it is impossible to reliably distinguish D2 from zero". -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ static void qqpsolver_quadraticmodel(qqpbuffers* sstate, /* Real */ ae_vector* x, /* Real */ ae_vector* d, /* Real */ ae_vector* g, double* d1, ae_int_t* d1est, double* d2, ae_int_t* d2est, /* Real */ ae_vector* tmp0, ae_state *_state) { ae_int_t n; ae_int_t i; double v; double mx; double mb; double md; *d1 = 0; *d1est = 0; *d2 = 0; *d2est = 0; n = sstate->n; /* * Maximums */ mx = 0.0; md = 0.0; mb = 0.0; for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(x->ptr.p_double[i], _state), _state); md = ae_maxreal(md, ae_fabs(d->ptr.p_double[i], _state), _state); } for(i=0; i<=n-1; i++) { mb = ae_maxreal(mb, ae_fabs(sstate->b.ptr.p_double[i], _state), _state); } /* * D2 */ if( sstate->akind==0 ) { /* * Dense matrix A */ *d2 = 0.5*rmatrixsyvmv(n, &sstate->densea, 0, 0, ae_true, d, 0, tmp0, _state); } else { /* * Sparse matrix A */ ae_assert(sstate->akind==1, "QQPOptimize: unexpected AKind in TargetGradient", _state); *d2 = 0.5*sparsevsmv(&sstate->sparsea, sstate->sparseupper, d, _state); } v = ae_v_dotproduct(&d->ptr.p_double[0], 1, &g->ptr.p_double[0], 1, ae_v_len(0,n-1)); *d1 = v; /* * Error estimates */ estimateparabolicmodel(sstate->absasum, sstate->absasum2, mx, mb, md, *d1, *d2, d1est, d2est, _state); } /************************************************************************* This function accepts quadratic model of the form f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) and list of possible steps along direction D. It chooses best step (one which achieves minimum value of the target function) and moves current point (given by SAS object) to the new location. Step is bounded subject to boundary constraints. Candidate steps are divided into two groups: * "default" step, which is always performed when no candidate steps LONGER THAN THE DEFAULT ONE is given. This candidate MUST reduce target function value; it is responsibility of caller to provide default candidate which reduces target function. * "additional candidates", which may be shorter or longer than the default step. Candidates which are shorter that the default step are ignored; candidates which are longer than the "default" step are tested. The idea is that we ALWAYS try "default" step, and it is responsibility of the caller to provide us with something which is worth trying. This step may activate some constraint - that's why we stopped at "default" step size. However, we may also try longer steps which may activate additional constraints and further reduce function value. INPUT PARAMETERS: SState - structure which stores model SAS - active set structure which stores current point in SAS.XC D - direction for step Stp - step length for "default" candidate NeedAct - whether default candidate activates some constraint; if NeedAct is True, constraint given by CIdc/CVal is GUARANTEED to be activated in the final point. CIdx - if NeedAct is True, stores index of the constraint to activate CVal - if NeedAct is True, stores constrained value; SAS.XC[CIdx] is forced to be equal to CVal. AddSteps- array[AddStepsCnt] of additional steps: * AddSteps[]<=Stp are ignored * AddSteps[]>Stp are tried Activated- possibly preallocated buffer; previously allocated memory will be reused. Tmp0/1 - possibly preallocated buffers; previously allocated memory will be reused. OUTPUT PARAMETERS: SAS - SAS.XC is set to new point; if there was a constraint specified by NeedAct/CIdx/CVal, it will be activated (other constraints may be activated too, but this one is guaranteed to be active in the final point). Activated- elements of this array are set to True, if I-th constraint as inactive at previous point, but become active in the new one. Situations when we deactivate xi>=0 and activate xi<=1 are considered as activation of previously inactive constraint -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ static void qqpsolver_findbeststepandmove(qqpbuffers* sstate, sactiveset* sas, /* Real */ ae_vector* d, double stp, ae_bool needact, ae_int_t cidx, double cval, /* Real */ ae_vector* addsteps, ae_int_t addstepscnt, /* Boolean */ ae_vector* activated, /* Real */ ae_vector* tmp0, /* Real */ ae_vector* tmp1, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t k; double v; double stpbest; double fbest; double fcand; n = sstate->n; rvectorsetlengthatleast(tmp0, n, _state); bvectorsetlengthatleast(activated, n, _state); /* * Calculate initial step, store to Tmp0 * * NOTE: Tmp0 is guaranteed to be feasible w.r.t. boundary constraints */ for(i=0; i<=n-1; i++) { v = sas->xc.ptr.p_double[i]+stp*d->ptr.p_double[i]; if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_less(v,sstate->bndl.ptr.p_double[i]) ) { v = sstate->bndl.ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_greater(v,sstate->bndu.ptr.p_double[i]) ) { v = sstate->bndu.ptr.p_double[i]; } tmp0->ptr.p_double[i] = v; } if( needact ) { tmp0->ptr.p_double[cidx] = cval; } /* * Try additional steps, if AddStepsCnt>0 */ if( addstepscnt>0 ) { /* * Find best step */ stpbest = stp; fbest = qqpsolver_projectedtargetfunction(sstate, &sas->xc, d, stpbest, tmp0, tmp1, _state); for(k=0; k<=addstepscnt-1; k++) { if( ae_fp_greater(addsteps->ptr.p_double[k],stp) ) { fcand = qqpsolver_projectedtargetfunction(sstate, &sas->xc, d, addsteps->ptr.p_double[k], tmp0, tmp1, _state); if( ae_fp_less(fcand,fbest) ) { fbest = fcand; stpbest = addsteps->ptr.p_double[k]; } } } /* * Prepare best step * * NOTE: because only AddSteps[]>Stp were checked, * this step will activate constraint CIdx. */ for(i=0; i<=n-1; i++) { v = sas->xc.ptr.p_double[i]+stpbest*d->ptr.p_double[i]; if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_less(v,sstate->bndl.ptr.p_double[i]) ) { v = sstate->bndl.ptr.p_double[i]; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_greater(v,sstate->bndu.ptr.p_double[i]) ) { v = sstate->bndu.ptr.p_double[i]; } tmp0->ptr.p_double[i] = v; } if( needact ) { tmp0->ptr.p_double[cidx] = cval; } } /* * Fill Activated array by information about activated constraints. * Perform step */ for(i=0; i<=n-1; i++) { activated->ptr.p_bool[i] = ae_false; v = tmp0->ptr.p_double[i]; if( ae_fp_eq(v,sas->xc.ptr.p_double[i]) ) { continue; } if( sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(v,sstate->bndl.ptr.p_double[i]) ) { activated->ptr.p_bool[i] = ae_true; } if( sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(v,sstate->bndu.ptr.p_double[i]) ) { activated->ptr.p_bool[i] = ae_true; } } sasmoveto(sas, tmp0, needact, cidx, cval, _state); } /************************************************************************* This function prepares data for constrained Newton step for penalized quadratic model of the form f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) where A can be dense or sparse, and model is considered subject to equality constraints specified by SState.SAS.XC object. Constraint is considered active if XC[i] is exactly BndL[i] or BndU[i], i.e. we ignore internal list of constraints monitored by SAS object. Our own set of constraints includes all constraints stored by SAS, but also may include some constraints which are inactive in SAS. "Preparation" means that Cholesky decomposition of the effective system matrix is performed, and we can perform constrained Newton step. This function works as black box. It uses fields of SState which are marked as "Variables for constrained Newton phase", and only this function and its friends know about these variables. Everyone else should use: * CNewtonBuild() to prepare initial Cholesky decomposition for step * CNewtonStep() to perform constrained Newton step * CNewtonUpdate() to update Cholesky matrix after point was moved and constraints were updated. In some cases it is possible to efficiently re-calculate Cholesky decomposition if you know which constraints were activated. If efficient re-calculation is impossible, this function returns False. INPUT PARAMETERS: SState - structure which stores model and temporaries for CN phase; in particular, SAS.XC stores current point. SparseSolver-which sparse solver to use for sparse model; ignored for dense QP. Can be: * 2 - SKS-based Cholesky NCholesky- counter which is incremented after Cholesky (successful or failed one) OUTPUT PARAMETERS: NCholesky- possibly updated counter RESULT: True, if Cholesky decomposition was successfully performed. False, if a) matrix was semi-definite or indefinite, or b) particular combination of matrix type (sparse) and constraints (general linear) is not supported. NOTE: this function may routinely return False, for indefinite matrices or for sparse problems with general linear constraints. You should be able to handle such situations. -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ static ae_bool qqpsolver_cnewtonbuild(qqpbuffers* sstate, ae_int_t sparsesolver, ae_int_t* ncholesky, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; ae_int_t k; double v; ae_bool b; ae_int_t ridx0; ae_int_t ridx1; ae_int_t nfree; ae_bool result; result = ae_false; /* * Fetch often used fields */ n = sstate->n; /* * 1. Set CNModelAge to zero * 2. Generate YIdx - reordering of variables such that free variables * come first and are ordered by ascending, fixed are last ones and * have no particular ordering. * * This step is same for dense and sparse problems. */ sstate->cnmodelage = 0; ivectorsetlengthatleast(&sstate->yidx, n, _state); ridx0 = 0; ridx1 = n-1; for(i=0; i<=n-1; i++) { sstate->yidx.ptr.p_int[i] = -1; } for(i=0; i<=n-1; i++) { ae_assert(!sstate->havebndl.ptr.p_bool[i]||ae_fp_greater_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndl.ptr.p_double[i]), "CNewtonBuild: internal error", _state); ae_assert(!sstate->havebndu.ptr.p_bool[i]||ae_fp_less_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndu.ptr.p_double[i]), "CNewtonBuild: internal error", _state); b = ae_false; b = b||(sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndl.ptr.p_double[i])); b = b||(sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndu.ptr.p_double[i])); if( b ) { sstate->yidx.ptr.p_int[ridx1] = i; ridx1 = ridx1-1; } else { sstate->yidx.ptr.p_int[ridx0] = i; ridx0 = ridx0+1; } } ae_assert(ridx0==ridx1+1, "CNewtonBuild: internal error", _state); nfree = ridx0; sstate->nfree = nfree; if( nfree==0 ) { return result; } /* * Constrained Newton matrix: dense version */ if( sstate->akind==0 ) { rmatrixsetlengthatleast(&sstate->densez, n, n, _state); rvectorsetlengthatleast(&sstate->tmpcn, n, _state); for(i=0; i<=n-1; i++) { for(j=i; j<=n-1; j++) { sstate->densez.ptr.pp_double[i][j] = sstate->densea.ptr.pp_double[i][j]; } } for(i=1; i<=nfree-1; i++) { ae_assert(sstate->yidx.ptr.p_int[i]>sstate->yidx.ptr.p_int[i-1], "CNewtonBuild: integrity check failed", _state); } for(i=0; i<=nfree-1; i++) { k = sstate->yidx.ptr.p_int[i]; for(j=i; j<=nfree-1; j++) { sstate->densez.ptr.pp_double[i][j] = sstate->densez.ptr.pp_double[k][sstate->yidx.ptr.p_int[j]]; } } rvectorsetlengthatleast(&sstate->regdiag, n, _state); for(i=0; i<=nfree-1; i++) { v = 0.0; for(j=0; j<=i-1; j++) { v = v+ae_fabs(sstate->densez.ptr.pp_double[j][i], _state); } for(j=i; j<=nfree-1; j++) { v = v+ae_fabs(sstate->densez.ptr.pp_double[i][j], _state); } if( ae_fp_eq(v,(double)(0)) ) { v = 1.0; } sstate->regdiag.ptr.p_double[i] = qqpsolver_regz*v; } for(i=0; i<=nfree-1; i++) { sstate->densez.ptr.pp_double[i][i] = sstate->densez.ptr.pp_double[i][i]+sstate->regdiag.ptr.p_double[i]; } inc(ncholesky, _state); if( !spdmatrixcholeskyrec(&sstate->densez, 0, nfree, ae_true, &sstate->tmpcn, _state) ) { return result; } for(i=nfree-1; i>=0; i--) { ae_v_move(&sstate->tmpcn.ptr.p_double[i], 1, &sstate->densez.ptr.pp_double[i][i], 1, ae_v_len(i,nfree-1)); k = sstate->yidx.ptr.p_int[i]; for(j=k; j<=n-1; j++) { sstate->densez.ptr.pp_double[k][j] = (double)(0); } for(j=i; j<=nfree-1; j++) { sstate->densez.ptr.pp_double[k][sstate->yidx.ptr.p_int[j]] = sstate->tmpcn.ptr.p_double[j]; } } for(i=nfree; i<=n-1; i++) { k = sstate->yidx.ptr.p_int[i]; sstate->densez.ptr.pp_double[k][k] = 1.0; for(j=k+1; j<=n-1; j++) { sstate->densez.ptr.pp_double[k][j] = (double)(0); } } result = ae_true; return result; } /* * Constrained Newton matrix: sparse version */ if( sstate->akind==1 ) { ae_assert(sparsesolver==2, "CNewtonBuild: internal error", _state); /* * Copy sparse A to Z and fill rows/columns corresponding to active * constraints by zeros. Diagonal elements corresponding to active * constraints are filled by unit values. */ sparsecopytosksbuf(&sstate->sparsea, &sstate->sparsecca, _state); rvectorsetlengthatleast(&sstate->tmpcn, n, _state); for(i=0; i<=n-1; i++) { sstate->tmpcn.ptr.p_double[i] = (double)(0); } for(i=nfree; i<=n-1; i++) { sstate->tmpcn.ptr.p_double[sstate->yidx.ptr.p_int[i]] = (double)(1); } for(i=0; i<=n-1; i++) { k = sstate->sparsecca.ridx.ptr.p_int[i]; for(j=i-sstate->sparsecca.didx.ptr.p_int[i]; j<=i; j++) { if( ae_fp_neq(sstate->tmpcn.ptr.p_double[i],(double)(0))||ae_fp_neq(sstate->tmpcn.ptr.p_double[j],(double)(0)) ) { /* * I-th or J-th variable is in active set (constrained) */ if( i==j ) { sstate->sparsecca.vals.ptr.p_double[k] = 1.0; } else { sstate->sparsecca.vals.ptr.p_double[k] = 0.0; } } k = k+1; } } /* * Perform sparse Cholesky */ inc(ncholesky, _state); if( !sparsecholeskyskyline(&sstate->sparsecca, n, sstate->sparseupper, _state) ) { return result; } result = ae_true; return result; } /* * Unexpected :) */ ae_assert(ae_false, "CNewtonBuild: internal error", _state); return result; } /************************************************************************* This function updates equality-constrained Cholesky matrix after activation of the new equality constraints. Matrix being updated is quadratic term of the function below f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) where A can be dense or sparse. This function uses YIdx[] array (set by CNewtonBuild() function) to distinguish between active and inactive constraints. This function works as black box. It uses fields of SState which are marked as "Variables for constrained Newton phase", and only this function and its friends know about these variables. Everyone else should use: * CNewtonBuild() to prepare initial Cholesky decomposition for step * CNewtonStep() to perform constrained Newton step * CNewtonUpdate() to update Cholesky matrix after point was moved and constraints were updated. In some cases it is possible to efficiently re-calculate Cholesky decomposition if you know which constraints were activated. If efficient re-calculation is impossible, this function returns False. INPUT PARAMETERS: SState - structure which stores model and temporaries for CN phase; in particular, SAS.XC stores current point. Settings - QQPSettings object which was initialized by appropriate construction function. NCUpdates- counter which is incremented after each update (one update means one variable being fixed) OUTPUT PARAMETERS: NCUpdates- possibly updated counter RESULT: True, if Cholesky decomposition was successfully performed. False, if a) model age was too high, or b) particular combination of matrix type (sparse) and constraints (general linear) is not supported NOTE: this function may routinely return False. You should be able to handle such situations. -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ static ae_bool qqpsolver_cnewtonupdate(qqpbuffers* sstate, qqpsettings* settings, ae_int_t* ncupdates, ae_state *_state) { ae_int_t n; ae_int_t nfree; ae_int_t ntofix; ae_bool b; ae_int_t ridx0; ae_int_t ridx1; ae_int_t i; ae_int_t k; ae_bool result; result = ae_false; /* * Cholesky updates for sparse problems are not supported */ if( sstate->akind==1 ) { return result; } /* * Fetch often used fields */ n = sstate->n; nfree = sstate->nfree; /* * Determine variables to fix and move them to YIdx[NFree-NToFix:NFree-1] * Exit if CNModelAge increased too much. */ ivectorsetlengthatleast(&sstate->tmpcni, n, _state); ridx0 = 0; ridx1 = nfree-1; for(i=0; i<=nfree-1; i++) { sstate->tmpcni.ptr.p_int[i] = -1; } for(k=0; k<=nfree-1; k++) { i = sstate->yidx.ptr.p_int[k]; ae_assert(!sstate->havebndl.ptr.p_bool[i]||ae_fp_greater_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndl.ptr.p_double[i]), "CNewtonUpdate: internal error", _state); ae_assert(!sstate->havebndu.ptr.p_bool[i]||ae_fp_less_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndu.ptr.p_double[i]), "CNewtonUpdate: internal error", _state); b = ae_false; b = b||(sstate->havebndl.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndl.ptr.p_double[i])); b = b||(sstate->havebndu.ptr.p_bool[i]&&ae_fp_eq(sstate->sas.xc.ptr.p_double[i],sstate->bndu.ptr.p_double[i])); if( b ) { sstate->tmpcni.ptr.p_int[ridx1] = i; ridx1 = ridx1-1; } else { sstate->tmpcni.ptr.p_int[ridx0] = i; ridx0 = ridx0+1; } } ae_assert(ridx0==ridx1+1, "CNewtonUpdate: internal error", _state); ntofix = nfree-ridx0; if( ntofix==0||ntofix==nfree ) { return result; } if( sstate->cnmodelage+ntofix>settings->cnmaxupdates ) { return result; } for(i=0; i<=nfree-1; i++) { sstate->yidx.ptr.p_int[i] = sstate->tmpcni.ptr.p_int[i]; } /* * Constrained Newton matrix: dense version. */ if( sstate->akind==0 ) { /* * Update Cholesky matrix with SPDMatrixCholeskyUpdateFixBuf() */ bvectorsetlengthatleast(&sstate->tmpcnb, n, _state); for(i=0; i<=n-1; i++) { sstate->tmpcnb.ptr.p_bool[i] = ae_false; } for(i=nfree-ntofix; i<=nfree-1; i++) { sstate->tmpcnb.ptr.p_bool[sstate->yidx.ptr.p_int[i]] = ae_true; } spdmatrixcholeskyupdatefixbuf(&sstate->densez, n, ae_true, &sstate->tmpcnb, &sstate->tmpcn, _state); /* * Update information stored in State and exit */ sstate->nfree = nfree-ntofix; sstate->cnmodelage = sstate->cnmodelage+ntofix; *ncupdates = *ncupdates+ntofix; result = ae_true; return result; } /* * Unexpected :) */ ae_assert(ae_false, "CNewtonUpdate: internal error", _state); return result; } /************************************************************************* This function prepares equality-constrained Newton step using previously calculated constrained Cholesky matrix of the problem f(x) = 0.5*x'*A*x + b'*x + penaltyfactor*0.5*(C*x-b)'*(C*x-b) where A can be dense or sparse. As input, this function accepts gradient at the current location. As output, it returns step vector (replaces gradient). This function works as black box. It uses fields of SState which are marked as "Variables for constrained Newton phase", and only this function and its friends know about these variables. Everyone else should use: * CNewtonBuild() to prepare initial Cholesky decomposition for step * CNewtonStep() to perform constrained Newton step * CNewtonUpdate() to update Cholesky matrix after point was moved and constraints were updated. In some cases it is possible to efficiently re-calculate Cholesky decomposition if you know which constraints were activated. If efficient re-calculation is impossible, this function returns False. INPUT PARAMETERS: SState - structure which stores model and temporaries for CN phase; in particular, SAS.XC stores current point. Settings - QQPSettings object which was initialized by appropriate construction function. GC - array[N], gradient of the target function OUTPUT PARAMETERS: GC - array[N], step vector (on success) RESULT: True, if step was successfully calculated. False, if step calculation failed: a) gradient was exactly zero, b) gradient norm was smaller than EpsG (stopping condition) c) all variables were equality-constrained NOTE: this function may routinely return False. You should be able to handle such situations. -- ALGLIB -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ static ae_bool qqpsolver_cnewtonstep(qqpbuffers* sstate, qqpsettings* settings, /* Real */ ae_vector* gc, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nfree; double v; ae_bool result; result = ae_false; n = sstate->n; nfree = sstate->nfree; for(i=nfree; i<=n-1; i++) { gc->ptr.p_double[sstate->yidx.ptr.p_int[i]] = 0.0; } v = ae_v_dotproduct(&gc->ptr.p_double[0], 1, &gc->ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_fp_less_eq(ae_sqrt(v, _state),settings->epsg) ) { return result; } for(i=0; i<=n-1; i++) { gc->ptr.p_double[i] = -gc->ptr.p_double[i]; } if( sstate->akind==0 ) { /* * Dense Newton step. * Use straightforward Cholesky solver. */ fblscholeskysolve(&sstate->densez, 1.0, n, ae_true, gc, &sstate->tmpcn, _state); result = ae_true; return result; } if( sstate->akind==1 ) { /* * Sparse Newton step. * * We have T*T' = L*L' = U'*U (depending on specific triangle stored in SparseCCA). */ if( sstate->sparseupper ) { sparsetrsv(&sstate->sparsecca, sstate->sparseupper, ae_false, 1, gc, _state); sparsetrsv(&sstate->sparsecca, sstate->sparseupper, ae_false, 0, gc, _state); } else { sparsetrsv(&sstate->sparsecca, sstate->sparseupper, ae_false, 0, gc, _state); sparsetrsv(&sstate->sparsecca, sstate->sparseupper, ae_false, 1, gc, _state); } result = ae_true; return result; } ae_assert(ae_false, "CNewtonStep: internal error", _state); return result; } void _qqpsettings_init(void* _p, ae_state *_state, ae_bool make_automatic) { qqpsettings *p = (qqpsettings*)_p; ae_touch_ptr((void*)p); } void _qqpsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qqpsettings *dst = (qqpsettings*)_dst; qqpsettings *src = (qqpsettings*)_src; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxouterits = src->maxouterits; dst->cgphase = src->cgphase; dst->cnphase = src->cnphase; dst->cgminits = src->cgminits; dst->cgmaxits = src->cgmaxits; dst->cnmaxupdates = src->cnmaxupdates; dst->sparsesolver = src->sparsesolver; } void _qqpsettings_clear(void* _p) { qqpsettings *p = (qqpsettings*)_p; ae_touch_ptr((void*)p); } void _qqpsettings_destroy(void* _p) { qqpsettings *p = (qqpsettings*)_p; ae_touch_ptr((void*)p); } void _qqpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) { qqpbuffers *p = (qqpbuffers*)_p; ae_touch_ptr((void*)p); ae_matrix_init(&p->densea, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsea, _state, make_automatic); ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->xs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xf, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgp, 0, DT_REAL, _state, make_automatic); _sactiveset_init(&p->sas, _state, make_automatic); ae_vector_init(&p->activated, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->densez, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsecca, _state, make_automatic); ae_vector_init(&p->yidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->regdiag, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->regx0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpcn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpcni, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->tmpcnb, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stpbuf, 0, DT_REAL, _state, make_automatic); _sparsebuffers_init(&p->sbuf, _state, make_automatic); } void _qqpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qqpbuffers *dst = (qqpbuffers*)_dst; qqpbuffers *src = (qqpbuffers*)_src; dst->n = src->n; dst->akind = src->akind; ae_matrix_init_copy(&dst->densea, &src->densea, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsea, &src->sparsea, _state, make_automatic); dst->sparseupper = src->sparseupper; dst->absamax = src->absamax; dst->absasum = src->absasum; dst->absasum2 = src->absasum2; ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic); ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic); ae_vector_init_copy(&dst->xs, &src->xs, _state, make_automatic); ae_vector_init_copy(&dst->xf, &src->xf, _state, make_automatic); ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic); ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic); ae_vector_init_copy(&dst->dc, &src->dc, _state, make_automatic); ae_vector_init_copy(&dst->dp, &src->dp, _state, make_automatic); ae_vector_init_copy(&dst->cgc, &src->cgc, _state, make_automatic); ae_vector_init_copy(&dst->cgp, &src->cgp, _state, make_automatic); _sactiveset_init_copy(&dst->sas, &src->sas, _state, make_automatic); ae_vector_init_copy(&dst->activated, &src->activated, _state, make_automatic); dst->nfree = src->nfree; dst->cnmodelage = src->cnmodelage; ae_matrix_init_copy(&dst->densez, &src->densez, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsecca, &src->sparsecca, _state, make_automatic); ae_vector_init_copy(&dst->yidx, &src->yidx, _state, make_automatic); ae_vector_init_copy(&dst->regdiag, &src->regdiag, _state, make_automatic); ae_vector_init_copy(&dst->regx0, &src->regx0, _state, make_automatic); ae_vector_init_copy(&dst->tmpcn, &src->tmpcn, _state, make_automatic); ae_vector_init_copy(&dst->tmpcni, &src->tmpcni, _state, make_automatic); ae_vector_init_copy(&dst->tmpcnb, &src->tmpcnb, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->stpbuf, &src->stpbuf, _state, make_automatic); _sparsebuffers_init_copy(&dst->sbuf, &src->sbuf, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repncholesky = src->repncholesky; dst->repncupdates = src->repncupdates; } void _qqpbuffers_clear(void* _p) { qqpbuffers *p = (qqpbuffers*)_p; ae_touch_ptr((void*)p); ae_matrix_clear(&p->densea); _sparsematrix_clear(&p->sparsea); ae_vector_clear(&p->b); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->havebndl); ae_vector_clear(&p->havebndu); ae_vector_clear(&p->xs); ae_vector_clear(&p->xf); ae_vector_clear(&p->gc); ae_vector_clear(&p->xp); ae_vector_clear(&p->dc); ae_vector_clear(&p->dp); ae_vector_clear(&p->cgc); ae_vector_clear(&p->cgp); _sactiveset_clear(&p->sas); ae_vector_clear(&p->activated); ae_matrix_clear(&p->densez); _sparsematrix_clear(&p->sparsecca); ae_vector_clear(&p->yidx); ae_vector_clear(&p->regdiag); ae_vector_clear(&p->regx0); ae_vector_clear(&p->tmpcn); ae_vector_clear(&p->tmpcni); ae_vector_clear(&p->tmpcnb); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->stpbuf); _sparsebuffers_clear(&p->sbuf); } void _qqpbuffers_destroy(void* _p) { qqpbuffers *p = (qqpbuffers*)_p; ae_touch_ptr((void*)p); ae_matrix_destroy(&p->densea); _sparsematrix_destroy(&p->sparsea); ae_vector_destroy(&p->b); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->havebndl); ae_vector_destroy(&p->havebndu); ae_vector_destroy(&p->xs); ae_vector_destroy(&p->xf); ae_vector_destroy(&p->gc); ae_vector_destroy(&p->xp); ae_vector_destroy(&p->dc); ae_vector_destroy(&p->dp); ae_vector_destroy(&p->cgc); ae_vector_destroy(&p->cgp); _sactiveset_destroy(&p->sas); ae_vector_destroy(&p->activated); ae_matrix_destroy(&p->densez); _sparsematrix_destroy(&p->sparsecca); ae_vector_destroy(&p->yidx); ae_vector_destroy(&p->regdiag); ae_vector_destroy(&p->regx0); ae_vector_destroy(&p->tmpcn); ae_vector_destroy(&p->tmpcni); ae_vector_destroy(&p->tmpcnb); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->stpbuf); _sparsebuffers_destroy(&p->sbuf); } #endif #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This function generates scaled (by S) and shifted (by XC) reformulation of the box constraints. INPUT PARAMETERS: S - scale vector, array[N]: * I-th element contains scale of I-th variable, * SC[I]>0 XOrigin - origin term, array[N]. Can be zero. BndL - raw lower bounds, array[N] BndU - raw upper bounds, array[N] N - number of variables. OUTPUT PARAMETERS: BndL - replaced by scaled/shifted lower bounds, array[N] BndU - replaced by scaled/shifted upper bounds, array[N] -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void scaleshiftbcinplace(/* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_int_t n, ae_state *_state) { ae_int_t i; ae_bool hasbndl; ae_bool hasbndu; for(i=0; i<=n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state)&&s->ptr.p_double[i]>0.0, "ScaleShiftBC: S[i] is nonpositive", _state); ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "ScaleShiftBC: BndL[i] is +INF or NAN", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "ScaleShiftBC: BndU[i] is -INF or NAN", _state); hasbndl = ae_isfinite(bndl->ptr.p_double[i], _state); hasbndu = ae_isfinite(bndu->ptr.p_double[i], _state); if( (hasbndl&&hasbndu)&&ae_fp_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { /* * Make sure that BndL[I]=BndU[I] bit-to-bit * even with CRAZY optimizing compiler. */ bndu->ptr.p_double[i] = (bndu->ptr.p_double[i]-xorigin->ptr.p_double[i])/s->ptr.p_double[i]; bndl->ptr.p_double[i] = bndu->ptr.p_double[i]; continue; } if( hasbndl ) { bndl->ptr.p_double[i] = (bndl->ptr.p_double[i]-xorigin->ptr.p_double[i])/s->ptr.p_double[i]; } if( hasbndu ) { bndu->ptr.p_double[i] = (bndu->ptr.p_double[i]-xorigin->ptr.p_double[i])/s->ptr.p_double[i]; } } } /************************************************************************* This function generates scaled (by S) and shifted (by XC) reformulation of two-sided "lower-bound/range" constraints stored in dense format. INPUT PARAMETERS: S - scale vector, array[N]: * I-th element contains scale of I-th variable, * SC[I]>0 XOrigin - origin term, array[N]. Can be zero. N - number of variables. DenseA - array[M,N], constraint matrix AB - lower bounds for constraints, always present and finite, array[M] AR - ranges for constraints, can be zero (equality constraint), positive (range constraint) or +INF (lower bound constraint), array[M] M - constraint count, M>=0 OUTPUT PARAMETERS: DenseA - replaced by scaled/shifted constraints, array[M,N] AB - replaced by scaled/shifted lower bounds, array[M] AR - replaced by scaled/shifted ranges, array[M] -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void scaleshiftdensebrlcinplace(/* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, /* Real */ ae_matrix* densea, /* Real */ ae_vector* ab, /* Real */ ae_vector* ar, ae_int_t m, ae_state *_state) { ae_int_t i; ae_int_t j; double v; double vv; for(i=0; i<=m-1; i++) { /* * Scale/shift constraint; shift its lower bound * * NOTE: range is NOT scaled or shifted */ v = 0.0; for(j=0; j<=n-1; j++) { vv = densea->ptr.pp_double[i][j]; v = v+vv*xorigin->ptr.p_double[j]; densea->ptr.pp_double[i][j] = vv*s->ptr.p_double[j]; } ab->ptr.p_double[i] = ab->ptr.p_double[i]-v; } } /************************************************************************* This function generates scaled (by S) and shifted (by XC) reformulation of two-sided "lower-bound/range" constraints stored in dense format. INPUT PARAMETERS: S - scale vector, array[N]: * I-th element contains scale of I-th variable, * SC[I]>0 XOrigin - origin term, array[N]. Can be zero. N - number of variables. SparseA - sparse MSparse*N constraint matrix in CRS format; ignored if MSparse=0. MSparse - dense constraint count, MSparse>=0 DenseA - array[MDense,N], constraint matrix; ignored if MDense=0. MDense - dense constraint count, MDense>=0 AB - lower bounds for constraints, always present and finite, array[MSparse+MDense] AR - ranges for constraints, can be zero (equality constraint), positive (range constraint) or +INF (lower bound constraint), array[MSparse+MDense] OUTPUT PARAMETERS: DenseA - replaced by scaled/shifted constraints, array[MDense,N] SparseA - replaced by scaled/shifted constraints, array[MSparse,N] AB - replaced by scaled/shifted lower bounds, array[MDense+MSparse] AR - replaced by scaled/shifted ranges, array[MDense+MSparse] -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void scaleshiftmixedbrlcinplace(/* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, sparsematrix* sparsea, ae_int_t msparse, /* Real */ ae_matrix* densea, ae_int_t mdense, /* Real */ ae_vector* ab, /* Real */ ae_vector* ar, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t k0; ae_int_t k1; double v; double vv; ae_assert(msparse==0||((sparsea->matrixtype==1&&sparsea->m==msparse)&&sparsea->n==n), "ScaleShiftMixedBRLCInplace: non-CRS sparse constraint matrix!", _state); for(i=0; i<=msparse-1; i++) { /* * Scale/shift constraint; shift its lower bound * * NOTE: range is NOT scaled or shifted */ v = 0.0; k0 = sparsea->ridx.ptr.p_int[i]; k1 = sparsea->ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { j = sparsea->idx.ptr.p_int[k]; vv = sparsea->vals.ptr.p_double[k]; v = v+vv*xorigin->ptr.p_double[j]; sparsea->vals.ptr.p_double[k] = vv*s->ptr.p_double[j]; } ab->ptr.p_double[i] = ab->ptr.p_double[i]-v; } for(i=0; i<=mdense-1; i++) { /* * Scale/shift constraint; shift its lower bound * * NOTE: range is NOT scaled or shifted */ v = 0.0; for(j=0; j<=n-1; j++) { vv = densea->ptr.pp_double[i][j]; v = v+vv*xorigin->ptr.p_double[j]; densea->ptr.pp_double[i][j] = vv*s->ptr.p_double[j]; } ab->ptr.p_double[msparse+i] = ab->ptr.p_double[msparse+i]-v; } } /************************************************************************* This function generates scaled (by S) reformulation of dense quadratic and linear terms in QP problem. INPUT PARAMETERS: N - number of variables. DenseA - array[NMain,NMain], quadratic term IsUpper - whether upper or lower triangle is present NMain - number of nonslack vars, 1<=NMain<=NTotal DenseB - array[NTotal], linear term NTotal - total number of variables, NTotal>=1 S - scale vector, array[NTotal]: * I-th element contains scale of I-th variable, * SC[I]>0 OUTPUT PARAMETERS: DenseA - replaced by scaled term, array[N,N] DenseB - replaced by scaled term, array[N] -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void scaledenseqpinplace(/* Real */ ae_matrix* densea, ae_bool isupper, ae_int_t nmain, /* Real */ ae_vector* denseb, ae_int_t ntotal, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; double si; for(i=0; i<=nmain-1; i++) { si = s->ptr.p_double[i]; if( isupper ) { j0 = i; j1 = nmain-1; } else { j0 = 0; j1 = i; } for(j=j0; j<=j1; j++) { densea->ptr.pp_double[i][j] = densea->ptr.pp_double[i][j]*si*s->ptr.p_double[j]; } } for(i=0; i<=ntotal-1; i++) { denseb->ptr.p_double[i] = denseb->ptr.p_double[i]*s->ptr.p_double[i]; } } /************************************************************************* This function generates scaled (by S) reformulation of sparse quadratic and linear terms in QP problem. INPUT PARAMETERS: S - scale vector, array[N]: * I-th element contains scale of I-th variable, * SC[I]>0 N - number of variables. SparseA - NxN SparseMatrix in CRS format (any triangle can be present, we will scale everything) DenseB - array[N], linear term OUTPUT PARAMETERS: SparseA - replaced by scaled term DenseB - replaced by scaled term -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void scalesparseqpinplace(/* Real */ ae_vector* s, ae_int_t n, sparsematrix* sparsea, /* Real */ ae_vector* denseb, ae_state *_state) { ae_int_t i; ae_int_t k0; ae_int_t k1; ae_int_t k; double si; ae_assert((sparsea->matrixtype==1&&sparsea->m==n)&&sparsea->n==n, "ScaleSparseQPInplace: SparseA in unexpected format", _state); for(i=0; i<=n-1; i++) { si = s->ptr.p_double[i]; k0 = sparsea->ridx.ptr.p_int[i]; k1 = sparsea->ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { sparsea->vals.ptr.p_double[k] = sparsea->vals.ptr.p_double[k]*si*s->ptr.p_double[sparsea->idx.ptr.p_int[k]]; } denseb->ptr.p_double[i] = denseb->ptr.p_double[i]*si; } } /************************************************************************* This function normalizes two-sided "lower-bound/range" constraints stored in dense format in such a way that L2 norms of rows (right hand side NOT included) become equal to 1.0. Exactly zero rows are handled correctly. INPUT PARAMETERS: DenseA - array[M,N], constraint matrix AB - lower bounds for constraints, always present and finite, array[M] AR - ranges for constraints, can be zero (equality constraint), positive (range constraint) or +INF (lower bound constraint), array[M] N - number of variables, N>=1. M - constraint count, M>=0 NeedNorms - whether we need row norms or not OUTPUT PARAMETERS: DenseA - replaced by normalized constraints, array[M,N] AB - replaced by normalized lower bounds, array[M] AR - replaced by normalized ranges, array[M] RowNorms - if NeedNorms is true, leading M elements (resized if length is less than M) are filled by row norms before normalization was performed. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void normalizedensebrlcinplace(/* Real */ ae_matrix* densea, /* Real */ ae_vector* ab, /* Real */ ae_vector* ar, ae_int_t n, ae_int_t m, /* Real */ ae_vector* rownorms, ae_bool neednorms, ae_state *_state) { ae_int_t i; ae_int_t j; double v; double vv; if( neednorms ) { rvectorsetlengthatleast(rownorms, m, _state); } for(i=0; i<=m-1; i++) { vv = 0.0; for(j=0; j<=n-1; j++) { v = densea->ptr.pp_double[i][j]; vv = vv+v*v; } vv = ae_sqrt(vv, _state); if( neednorms ) { rownorms->ptr.p_double[i] = vv; } if( ae_fp_greater(vv,(double)(0)) ) { vv = 1/vv; for(j=0; j<=n-1; j++) { densea->ptr.pp_double[i][j] = densea->ptr.pp_double[i][j]*vv; } ab->ptr.p_double[i] = ab->ptr.p_double[i]*vv; if( ae_isfinite(ar->ptr.p_double[i], _state) ) { ar->ptr.p_double[i] = ar->ptr.p_double[i]*vv; } } } } /************************************************************************* This function normalizes two-sided "lower-bound/range" constraints stored in dense format in such a way that L2 norms of rows (right hand side NOT included) become equal to 1.0. Exactly zero rows are handled correctly. INPUT PARAMETERS: SparseA - sparse MSparse*N constraint matrix in CRS format; ignored if MSparse=0. MSparse - dense constraint count, MSparse>=0 DenseA - array[MDense,N], constraint matrix; ignored if MDense=0. MDense - dense constraint count, MDense>=0 AB - lower bounds for constraints, always present and finite, array[MSparse+MDense] AR - ranges for constraints, can be zero (equality constraint), positive (range constraint) or +INF (lower bound constraint), array[MSparse+MDense] N - number of variables, N>=1. NeedNorms - whether we need row norms or not OUTPUT PARAMETERS: DenseA - replaced by normalized constraints, array[M,N] AB - replaced by normalized lower bounds, array[M] AR - replaced by normalized ranges, array[M] RowNorms - if NeedNorms is true, leading M elements (resized if length is less than M) are filled by row norms before normalization was performed. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void normalizemixedbrlcinplace(sparsematrix* sparsea, ae_int_t msparse, /* Real */ ae_matrix* densea, ae_int_t mdense, /* Real */ ae_vector* ab, /* Real */ ae_vector* ar, ae_int_t n, /* Real */ ae_vector* rownorms, ae_bool neednorms, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t k0; ae_int_t k1; double v; double vv; ae_assert(msparse==0||((sparsea->matrixtype==1&&sparsea->m==msparse)&&sparsea->n==n), "ScaleShiftMixedBRLCInplace: non-CRS sparse constraint matrix!", _state); if( neednorms ) { rvectorsetlengthatleast(rownorms, mdense+msparse, _state); } for(i=0; i<=msparse-1; i++) { vv = 0.0; k0 = sparsea->ridx.ptr.p_int[i]; k1 = sparsea->ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { v = sparsea->vals.ptr.p_double[k]; vv = vv+v*v; } vv = ae_sqrt(vv, _state); if( neednorms ) { rownorms->ptr.p_double[i] = vv; } if( ae_fp_greater(vv,(double)(0)) ) { vv = 1/vv; for(k=k0; k<=k1; k++) { sparsea->vals.ptr.p_double[k] = sparsea->vals.ptr.p_double[k]*vv; } ab->ptr.p_double[i] = ab->ptr.p_double[i]*vv; if( ae_isfinite(ar->ptr.p_double[i], _state) ) { ar->ptr.p_double[i] = ar->ptr.p_double[i]*vv; } } } for(i=0; i<=mdense-1; i++) { vv = 0.0; for(j=0; j<=n-1; j++) { v = densea->ptr.pp_double[i][j]; vv = vv+v*v; } vv = ae_sqrt(vv, _state); if( neednorms ) { rownorms->ptr.p_double[msparse+i] = vv; } if( ae_fp_greater(vv,(double)(0)) ) { vv = 1/vv; for(j=0; j<=n-1; j++) { densea->ptr.pp_double[i][j] = densea->ptr.pp_double[i][j]*vv; } ab->ptr.p_double[msparse+i] = ab->ptr.p_double[msparse+i]*vv; if( ae_isfinite(ar->ptr.p_double[msparse+i], _state) ) { ar->ptr.p_double[msparse+i] = ar->ptr.p_double[msparse+i]*vv; } } } } /************************************************************************* This function normalizes dense QP problem in such a way that maximum over its linear/quadratic coefficients max(max(A),max(B)) becomes equal to 1.0. NOTE: completely zero A and B are handled correctly. INPUT PARAMETERS: DenseA - array[NMain,NMain], quadratic term IsUpper - whether upper or lower triangle is present NMain - number of nonslack vars, 1<=NMain<=NTotal DenseB - array[NTotal], linear term NTotal - total number of variables. OUTPUT PARAMETERS: DenseA - replaced by normalized term DenseB - replaced by normalized term RESULT: max(max(A),max(B)) is returned -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ double normalizedenseqpinplace(/* Real */ ae_matrix* densea, ae_bool isupper, ae_int_t nmain, /* Real */ ae_vector* denseb, ae_int_t ntotal, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; double mx; double v; double result; mx = (double)(0); for(i=0; i<=nmain-1; i++) { if( isupper ) { j0 = i; j1 = nmain-1; } else { j0 = 0; j1 = i; } for(j=j0; j<=j1; j++) { mx = ae_maxreal(mx, ae_fabs(densea->ptr.pp_double[i][j], _state), _state); } } for(i=0; i<=ntotal-1; i++) { mx = ae_maxreal(mx, ae_fabs(denseb->ptr.p_double[i], _state), _state); } result = mx; if( ae_fp_eq(mx,(double)(0)) ) { return result; } v = 1/mx; for(i=0; i<=nmain-1; i++) { if( isupper ) { j0 = i; j1 = nmain-1; } else { j0 = 0; j1 = i; } for(j=j0; j<=j1; j++) { densea->ptr.pp_double[i][j] = densea->ptr.pp_double[i][j]*v; } } for(i=0; i<=ntotal-1; i++) { denseb->ptr.p_double[i] = denseb->ptr.p_double[i]*v; } return result; } /************************************************************************* This function normalizes sparse QP problem in such a way that maximum over its linear/quadratic coefficients max(max(A),max(B)) becomes equal to 1.0. NOTE: completely zero A and B are handled correctly. INPUT PARAMETERS: SparseA - Sparse NxN matrix, either upper or lower triangle, diagonal MUST be present IsUpper - which triangle is present (other one is ignored) DenseB - array[N], linear term N - number of variables. OUTPUT PARAMETERS: DenseA - replaced by normalized term, array[N,N] DenseB - replaced by normalized term, array[N] RESULT: max(max(A),max(B)) is returned -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ double normalizesparseqpinplace(sparsematrix* sparsea, ae_bool isupper, /* Real */ ae_vector* denseb, ae_int_t n, ae_state *_state) { ae_int_t i; ae_int_t k; ae_int_t k0; ae_int_t k1; double mx; double v; double result; ae_assert((sparsea->matrixtype==1&&sparsea->m==n)&&sparsea->n==n, "ScaleSparseQPInplace: SparseA in unexpected format", _state); mx = (double)(0); for(i=0; i<=n-1; i++) { ae_assert(sparsea->didx.ptr.p_int[i]+1==sparsea->uidx.ptr.p_int[i], "NormalizeSparseQPInplace: critical integrity check failed, sparse diagonal not found", _state); if( isupper ) { k0 = sparsea->didx.ptr.p_int[i]; k1 = sparsea->ridx.ptr.p_int[i+1]-1; } else { k0 = sparsea->ridx.ptr.p_int[i]; k1 = sparsea->didx.ptr.p_int[i]; } for(k=k0; k<=k1; k++) { mx = ae_maxreal(mx, ae_fabs(sparsea->vals.ptr.p_double[k], _state), _state); } mx = ae_maxreal(mx, ae_fabs(denseb->ptr.p_double[i], _state), _state); } result = mx; if( ae_fp_eq(mx,(double)(0)) ) { return result; } v = 1/mx; for(i=0; i<=n-1; i++) { k0 = sparsea->ridx.ptr.p_int[i]; k1 = sparsea->ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { sparsea->vals.ptr.p_double[k] = sparsea->vals.ptr.p_double[k]*v; } denseb->ptr.p_double[i] = denseb->ptr.p_double[i]*v; } return result; } /************************************************************************* This function performs transformation of X from scaled/shifted coordinates to unscaled/unshifted ones, paying special attention to box constraints: * points which were exactly at the boundary before scaling will be mapped to corresponding boundary after scaling * in any case, unscaled box constraints will be satisfied -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ void unscaleunshiftpointbc(/* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, /* Real */ ae_vector* rawbndl, /* Real */ ae_vector* rawbndu, /* Real */ ae_vector* sclsftbndl, /* Real */ ae_vector* sclsftbndu, /* Boolean */ ae_vector* hasbndl, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* x, ae_int_t n, ae_state *_state) { ae_int_t i; for(i=0; i<=n-1; i++) { if( hasbndl->ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],sclsftbndl->ptr.p_double[i]) ) { x->ptr.p_double[i] = rawbndl->ptr.p_double[i]; continue; } if( hasbndu->ptr.p_bool[i]&&ae_fp_greater_eq(x->ptr.p_double[i],sclsftbndu->ptr.p_double[i]) ) { x->ptr.p_double[i] = rawbndu->ptr.p_double[i]; continue; } x->ptr.p_double[i] = x->ptr.p_double[i]*s->ptr.p_double[i]+xorigin->ptr.p_double[i]; if( hasbndl->ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],rawbndl->ptr.p_double[i]) ) { x->ptr.p_double[i] = rawbndl->ptr.p_double[i]; } if( hasbndu->ptr.p_bool[i]&&ae_fp_greater_eq(x->ptr.p_double[i],rawbndu->ptr.p_double[i]) ) { x->ptr.p_double[i] = rawbndu->ptr.p_double[i]; } } } #endif #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Initializes QP-IPM state and prepares it to receive quadratic/linear terms and constraints. The solver is configured to work internally with dense NxN factorization, no matter what exactly is passed - dense or sparse matrices. INPUT PARAMETERS: State - solver state to be configured; previously allocated memory is reused as much as possible S - scale vector, array[N]: * I-th element contains scale of I-th variable, * S[I]>0 XOrigin - origin term, array[N]. Can be zero. The solver solves problem of the form > > min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) > The terms A and b (as well as constraints) will be specified later with separate calls. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipminitdense(vipmstate* state, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, ae_state *_state) { ae_assert(n>=1, "VIPMInitDense: N<1", _state); ae_assert(isfinitevector(s, n, _state), "VIPMInitDense: S contains infinite or NaN elements", _state); ae_assert(isfinitevector(xorigin, n, _state), "VIPMInitDense: XOrigin contains infinite or NaN elements", _state); vipmsolver_vipminit(state, s, xorigin, n, n, 0, _state); } /************************************************************************* Initializes QP-IPM state and prepares it to receive quadratic/linear terms and constraints. The solver is configured to work internally with dense NxN problem divided into two distinct parts - "main" and slack one: * dense quadratic term is a NMain*NMain matrix (NMain<=N), quadratic coefficients are zero for variables outside of [0,NMain) range) * linear term is general vector of length N * linear constraints have special structure for variable with indexes in [NMain,N) range: at most one element per column can be nonzero. This mode is intended for problems arising during SL1QP nonlinear programming. INPUT PARAMETERS: State - solver state to be configured; previously allocated memory is reused as much as possible S - scale vector, array[N]: * I-th element contains scale of I-th variable, * S[I]>0 XOrigin - origin term, array[N]. Can be zero. The solver solves problem of the form > > min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) > The terms A and b (as well as constraints) will be specified later with separate calls. NMain - number of "main" variables, 1<=NMain<=N N - total number of variables including slack ones -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipminitdensewithslacks(vipmstate* state, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t nmain, ae_int_t n, ae_state *_state) { ae_assert(nmain>=1, "VIPMInitDense: NMain<1", _state); ae_assert(n>=1, "VIPMInitDense: N<1", _state); ae_assert(nmain<=n, "VIPMInitDense: NMain>N", _state); ae_assert(isfinitevector(s, n, _state), "VIPMInitDense: S contains infinite or NaN elements", _state); ae_assert(isfinitevector(xorigin, n, _state), "VIPMInitDense: XOrigin contains infinite or NaN elements", _state); vipmsolver_vipminit(state, s, xorigin, n, nmain, 0, _state); } /************************************************************************* Initializes QP-IPM state and prepares it to receive quadratic/linear terms and constraints. The solver is configured to work internally with sparse (N+M)x(N+M) factorization no matter what exactly is passed - dense or sparse matrices. Dense quadratic term will be sparsified prior to storage. INPUT PARAMETERS: State - solver state to be configured; previously allocated memory is reused as much as possible S - scale vector, array[N]: * I-th element contains scale of I-th variable, * S[I]>0 XOrigin - origin term, array[N]. Can be zero. The solver solves problem of the form > > min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) > The terms A and b (as well as constraints) will be specified later with separate calls. N - total number of variables, N>=1 This optimization mode assumes that no slack variables is present. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipminitsparse(vipmstate* state, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, ae_state *_state) { ae_assert(n>=1, "VIPMInitSparse: N<1", _state); ae_assert(isfinitevector(s, n, _state), "VIPMInitSparse: S contains infinite or NaN elements", _state); ae_assert(isfinitevector(xorigin, n, _state), "VIPMInitSparse: XOrigin contains infinite or NaN elements", _state); vipmsolver_vipminit(state, s, xorigin, n, n, 1, _state); } /************************************************************************* Sets linear/quadratic terms for QP-IPM solver If you initialized solver with VIMPInitDenseWithSlacks(), NMain below is a number of non-slack variables. In other cases, NMain=N. INPUT PARAMETERS: State - instance initialized with one of the initialization functions DenseH - if HKind=0: array[NMain,NMain], dense quadratic term (either upper or lower triangle) SparseH - if HKind=1: array[NMain,NMain], sparse quadratic term (either upper or lower triangle) HKind - 0 or 1, quadratic term format IsUpper - whether dense/sparse H contains lower or upper triangle of the quadratic term C - array[N], linear term -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipmsetquadraticlinear(vipmstate* state, /* Real */ ae_matrix* denseh, sparsematrix* sparseh, ae_int_t hkind, ae_bool isupper, /* Real */ ae_vector* c, ae_state *_state) { ae_int_t nmain; ae_int_t n; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t j0; ae_int_t j1; double v; double vv; ae_int_t nnz; ae_int_t offs; nmain = state->nmain; n = state->n; ae_assert(hkind==0||hkind==1, "VIPMSetQuadraticLinear: incorrect HKind", _state); ae_assert(isfinitevector(c, n, _state), "VIPMSetQuadraticLinear: C contains infinite or NaN elements", _state); ae_assert(state->factorizationtype==0||state->factorizationtype==1, "VIPMSetQuadraticLinear: unexpected factorization type", _state); /* * Set problem info, reset factorization flag */ state->islinear = ae_false; state->factorizationpresent = ae_false; state->factorizationpoweredup = ae_false; /* * Linear term */ rvectorsetlengthatleast(&state->c, n, _state); rvectorcopy(n, c, 0, &state->c, 0, _state); /* * Quadratic term and normalization * * NOTE: we perform integrity check for inifinities/NANs by * computing sum of all matrix elements and checking its * value for being finite. It is a bit faster than checking * each element individually. */ state->hkind = -1; state->targetscale = 1.0; if( state->factorizationtype==0 ) { /* * Quadratic term is stored in dense format: either copy dense * term of densify sparse one */ state->hkind = 0; rmatrixsetlengthatleast(&state->denseh, nmain, nmain, _state); if( hkind==0 ) { /* * Copy dense quadratic term */ if( isupper ) { rmatrixtranspose(nmain, nmain, denseh, 0, 0, &state->denseh, 0, 0, _state); } else { rmatrixcopy(nmain, nmain, denseh, 0, 0, &state->denseh, 0, 0, _state); } } if( hkind==1 ) { /* * Extract sparse quadratic term */ ae_assert(sparseh->matrixtype==1, "VIPMSetQuadraticLinear: unexpected sparse matrix format", _state); ae_assert(sparseh->m==nmain, "VIPMSetQuadraticLinear: unexpected sparse matrix size", _state); ae_assert(sparseh->n==nmain, "VIPMSetQuadraticLinear: unexpected sparse matrix size", _state); for(i=0; i<=nmain-1; i++) { for(j=0; j<=i; j++) { state->denseh.ptr.pp_double[i][j] = (double)(0); } } for(i=0; i<=nmain-1; i++) { /* * diagonal element */ if( sparseh->didx.ptr.p_int[i]!=sparseh->uidx.ptr.p_int[i] ) { state->denseh.ptr.pp_double[i][i] = sparseh->vals.ptr.p_double[sparseh->didx.ptr.p_int[i]]; } /* * Off-diagonal elements */ if( isupper ) { /* * superdiagonal elements are moved to subdiagonal part */ j0 = sparseh->uidx.ptr.p_int[i]; j1 = sparseh->ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->denseh.ptr.pp_double[sparseh->idx.ptr.p_int[j]][i] = sparseh->vals.ptr.p_double[j]; } } else { /* * subdiagonal elements are moved to subdiagonal part */ j0 = sparseh->ridx.ptr.p_int[i]; j1 = sparseh->didx.ptr.p_int[i]-1; for(j=j0; j<=j1; j++) { state->denseh.ptr.pp_double[i][sparseh->idx.ptr.p_int[j]] = sparseh->vals.ptr.p_double[j]; } } } } vv = (double)(0); for(i=0; i<=nmain-1; i++) { for(j=0; j<=i; j++) { vv = vv+state->denseh.ptr.pp_double[i][j]; } } ae_assert(ae_isfinite(vv, _state), "VIPMSetQuadraticLinear: DenseH contains infinite or NaN values!", _state); scaledenseqpinplace(&state->denseh, ae_false, nmain, &state->c, n, &state->scl, _state); state->targetscale = normalizedenseqpinplace(&state->denseh, ae_false, nmain, &state->c, n, _state); } if( state->factorizationtype==1 ) { ae_assert(nmain==n, "VIPMSetQuadraticLinear: critical integrity check failed, NMain!=N", _state); /* * Quadratic term is stored in sparse format: either sparsify dense * term of copy sparse one */ state->hkind = 1; state->sparseh.matrixtype = 1; state->sparseh.m = n; state->sparseh.n = n; if( hkind==0 ) { /* * Sparsify dense term */ nnz = 0; for(i=0; i<=n-1; i++) { nnz = nnz+1; if( isupper ) { j0 = i+1; j1 = n-1; } else { j0 = 0; j1 = i-1; } for(j=j0; j<=j1; j++) { if( denseh->ptr.pp_double[i][j]!=0 ) { nnz = nnz+1; } } } ivectorsetlengthatleast(&state->sparseh.ridx, n+1, _state); ivectorsetlengthatleast(&state->sparseh.idx, nnz, _state); rvectorsetlengthatleast(&state->sparseh.vals, nnz, _state); state->sparseh.ridx.ptr.p_int[0] = 0; offs = 0; vv = (double)(0); for(i=0; i<=n-1; i++) { /* * Off-diagonal elements are copied only when nonzero */ if( !isupper ) { for(j=0; j<=i-1; j++) { if( denseh->ptr.pp_double[i][j]!=0 ) { v = denseh->ptr.pp_double[i][j]; state->sparseh.idx.ptr.p_int[offs] = j; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; } } } /* * Diagonal element is always copied */ v = denseh->ptr.pp_double[i][i]; state->sparseh.idx.ptr.p_int[offs] = i; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; /* * Off-diagonal elements are copied only when nonzero */ if( isupper ) { for(j=i+1; j<=n-1; j++) { if( denseh->ptr.pp_double[i][j]!=0 ) { v = denseh->ptr.pp_double[i][j]; state->sparseh.idx.ptr.p_int[offs] = j; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; } } } /* * Finalize row */ state->sparseh.ridx.ptr.p_int[i+1] = offs; } ae_assert(ae_isfinite(vv, _state), "VIPMSetQuadraticLinear: DenseH contains infinite or NaN values!", _state); ae_assert(offs==nnz, "VIPMSetQuadraticLinear: integrity check failed", _state); sparsecreatecrsinplace(&state->sparseh, _state); } if( hkind==1 ) { /* * Copy sparse quadratic term, but make sure that we have diagonal elements * present (we add diagonal if it is not present) */ ae_assert(sparseh->matrixtype==1, "VIPMSetQuadraticLinear: unexpected sparse matrix format", _state); ae_assert(sparseh->m==n, "VIPMSetQuadraticLinear: unexpected sparse matrix size", _state); ae_assert(sparseh->n==n, "VIPMSetQuadraticLinear: unexpected sparse matrix size", _state); ivectorsetlengthatleast(&state->sparseh.ridx, n+1, _state); ivectorsetlengthatleast(&state->sparseh.idx, sparseh->ridx.ptr.p_int[n]+n, _state); rvectorsetlengthatleast(&state->sparseh.vals, sparseh->ridx.ptr.p_int[n]+n, _state); state->sparseh.ridx.ptr.p_int[0] = 0; offs = 0; vv = (double)(0); for(i=0; i<=n-1; i++) { /* * Copy subdiagonal elements (if needed) */ if( !isupper ) { j0 = sparseh->ridx.ptr.p_int[i]; j1 = sparseh->didx.ptr.p_int[i]-1; for(k=j0; k<=j1; k++) { v = sparseh->vals.ptr.p_double[k]; state->sparseh.idx.ptr.p_int[offs] = sparseh->idx.ptr.p_int[k]; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; } } /* * Diagonal element is always copied */ v = (double)(0); if( sparseh->uidx.ptr.p_int[i]!=sparseh->didx.ptr.p_int[i] ) { v = sparseh->vals.ptr.p_double[sparseh->didx.ptr.p_int[i]]; } state->sparseh.idx.ptr.p_int[offs] = i; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; /* * Copy superdiagonal elements (if needed) */ if( isupper ) { j0 = sparseh->uidx.ptr.p_int[i]; j1 = sparseh->ridx.ptr.p_int[i+1]-1; for(k=j0; k<=j1; k++) { v = sparseh->vals.ptr.p_double[k]; state->sparseh.idx.ptr.p_int[offs] = sparseh->idx.ptr.p_int[k]; state->sparseh.vals.ptr.p_double[offs] = v; vv = vv+v; offs = offs+1; } } /* * Finalize row */ state->sparseh.ridx.ptr.p_int[i+1] = offs; } ae_assert(ae_isfinite(vv, _state), "VIPMSetQuadraticLinear: SparseH contains infinite or NaN values!", _state); ae_assert(offs<=state->sparseh.vals.cnt&&offs<=state->sparseh.idx.cnt, "VIPMSetQuadraticLinear: integrity check failed", _state); sparsecreatecrsinplace(&state->sparseh, _state); if( isupper ) { sparsecopytransposecrsbuf(&state->sparseh, &state->tmpsparse0, _state); sparsecopybuf(&state->tmpsparse0, &state->sparseh, _state); } } scalesparseqpinplace(&state->scl, n, &state->sparseh, &state->c, _state); state->targetscale = normalizesparseqpinplace(&state->sparseh, ae_false, &state->c, n, _state); } ae_assert(state->hkind>=0, "VIPMSetQuadraticLinear: integrity check failed", _state); } /************************************************************************* Sets constraints for QP-IPM solver INPUT PARAMETERS: State - instance initialized with one of the initialization functions BndL, BndU - lower and upper bound. BndL[] can be -INF, BndU[] can be +INF. SparseA - sparse constraint matrix, CRS format MSparse - number of sparse constraints DenseA - array[MDense,N], dense part of the constraints MDense - number of dense constraints CL, CU - lower and upper bounds for constraints, first MSparse are bounds for sparse part, following MDense ones are bounds for dense part, MSparse+MDense in total. -INF <= CL[I] <= CU[I] <= +INF. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipmsetconstraints(vipmstate* state, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, sparsematrix* sparsea, ae_int_t msparse, /* Real */ ae_matrix* densea, ae_int_t mdense, /* Real */ ae_vector* cl, /* Real */ ae_vector* cu, ae_state *_state) { ae_int_t m; ae_int_t n; ae_int_t nmain; ae_int_t nslack; ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; ae_int_t k; ae_int_t offsmain; ae_int_t offscombined; double vs; double v; n = state->n; nmain = state->nmain; nslack = n-nmain; ae_assert(mdense>=0, "VIPMSetConstraints: MDense<0", _state); ae_assert(msparse>=0, "VIPMSetConstraints: MSparse<0", _state); ae_assert(apservisfinitematrix(densea, mdense, n, _state), "VIPMSetConstraints: DenseA contains infinite or NaN values!", _state); ae_assert(msparse==0||sparsea->matrixtype==1, "VIPMSetConstraints: non-CRS constraint matrix!", _state); ae_assert(msparse==0||(sparsea->m==msparse&&sparsea->n==n), "VIPMSetConstraints: constraint matrix has incorrect size", _state); ae_assert(cl->cnt>=mdense+msparse, "VIPMSetConstraints: CL is too short!", _state); ae_assert(cu->cnt>=mdense+msparse, "VIPMSetConstraints: CU is too short!", _state); /* * Reset factorization flag */ state->factorizationpresent = ae_false; state->factorizationpoweredup = ae_false; /* * Box constraints */ rvectorsetlengthatleast(&state->bndl, n, _state); rvectorsetlengthatleast(&state->bndu, n, _state); rvectorsetlengthatleast(&state->rawbndl, n, _state); rvectorsetlengthatleast(&state->rawbndu, n, _state); bvectorsetlengthatleast(&state->hasbndl, n, _state); bvectorsetlengthatleast(&state->hasbndu, n, _state); for(i=0; i<=n-1; i++) { state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->rawbndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->rawbndu.ptr.p_double[i] = bndu->ptr.p_double[i]; } scaleshiftbcinplace(&state->scl, &state->xorigin, &state->bndl, &state->bndu, n, _state); /* * Linear constraints (full matrices) */ m = mdense+msparse; rvectorsetlengthatleast(&state->b, m, _state); rvectorsetlengthatleast(&state->r, m, _state); rvectorsetlengthatleast(&state->ascales, m, _state); bvectorsetlengthatleast(&state->hasr, m, _state); rmatrixsetlengthatleast(&state->denseafull, mdense, n, _state); if( msparse>0 ) { sparsecopytocrsbuf(sparsea, &state->sparseafull, _state); } if( mdense>0 ) { rmatrixcopy(mdense, n, densea, 0, 0, &state->denseafull, 0, 0, _state); } for(i=0; i<=m-1; i++) { ae_assert(ae_isfinite(cl->ptr.p_double[i], _state)||ae_isneginf(cl->ptr.p_double[i], _state), "VIPMInitDenseQuadratic: CL is not finite number or -INF", _state); ae_assert(ae_isfinite(cu->ptr.p_double[i], _state)||ae_isposinf(cu->ptr.p_double[i], _state), "VIPMInitDenseQuadratic: CU is not finite number or +INF", _state); ae_assert(ae_isfinite(cl->ptr.p_double[i], _state)||ae_isfinite(cu->ptr.p_double[i], _state), "VIPMInitDenseQuadratic: linear constraint with CL=-INF, CU=+INF; degenerate problem formulation", _state); /* * Store range */ if( ae_isfinite(cl->ptr.p_double[i], _state) ) { ae_assert(!ae_isfinite(cu->ptr.p_double[i], _state)||ae_fp_greater_eq(cu->ptr.p_double[i],cl->ptr.p_double[i]), "VIPMInitDenseQuadratic: inconsistent range (right-hand side) for linear constraint", _state); state->b.ptr.p_double[i] = cl->ptr.p_double[i]; state->r.ptr.p_double[i] = cu->ptr.p_double[i]-cl->ptr.p_double[i]; state->hasr.ptr.p_bool[i] = ae_isfinite(cu->ptr.p_double[i], _state); vs = (double)(1); } else { state->b.ptr.p_double[i] = -cu->ptr.p_double[i]; state->r.ptr.p_double[i] = _state->v_posinf; state->hasr.ptr.p_bool[i] = ae_false; vs = (double)(-1); } /* * Store matrix row and its scaling coefficient */ if( isparseafull.ridx.ptr.p_int[i]; j1 = state->sparseafull.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->sparseafull.vals.ptr.p_double[j] = state->sparseafull.vals.ptr.p_double[j]*vs; } } else { for(j=0; j<=n-1; j++) { state->denseafull.ptr.pp_double[i-msparse][j] = state->denseafull.ptr.pp_double[i-msparse][j]*vs; } } state->ascales.ptr.p_double[i] = vs; } scaleshiftmixedbrlcinplace(&state->scl, &state->xorigin, n, &state->sparseafull, msparse, &state->denseafull, mdense, &state->b, &state->r, _state); normalizemixedbrlcinplace(&state->sparseafull, msparse, &state->denseafull, mdense, &state->b, &state->r, n, &state->tmp0, ae_true, _state); for(i=0; i<=m-1; i++) { state->ascales.ptr.p_double[i] = state->ascales.ptr.p_double[i]*state->tmp0.ptr.p_double[i]; } state->mdense = mdense; state->msparse = msparse; /* * Separate main and slack parts of the constraint matrices */ ivectorsetlengthatleast(&state->tmpi, nslack, _state); for(i=0; i<=nslack-1; i++) { state->tmpi.ptr.p_int[i] = 0; } state->combinedaslack.m = mdense+msparse; state->combinedaslack.n = nslack; ivectorsetlengthatleast(&state->combinedaslack.ridx, mdense+msparse+1, _state); ivectorsetlengthatleast(&state->combinedaslack.idx, nslack, _state); rvectorsetlengthatleast(&state->combinedaslack.vals, nslack, _state); state->combinedaslack.ridx.ptr.p_int[0] = 0; state->sparseamain.m = msparse; state->sparseamain.n = nmain; if( msparse>0 ) { ivectorsetlengthatleast(&state->sparseamain.ridx, msparse+1, _state); ivectorsetlengthatleast(&state->sparseamain.idx, sparsea->ridx.ptr.p_int[msparse], _state); rvectorsetlengthatleast(&state->sparseamain.vals, sparsea->ridx.ptr.p_int[msparse], _state); state->sparseamain.ridx.ptr.p_int[0] = 0; for(i=0; i<=msparse-1; i++) { offsmain = state->sparseamain.ridx.ptr.p_int[i]; offscombined = state->combinedaslack.ridx.ptr.p_int[i]; j0 = state->sparseafull.ridx.ptr.p_int[i]; j1 = state->sparseafull.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { v = state->sparseafull.vals.ptr.p_double[j]; k = state->sparseafull.idx.ptr.p_int[j]; if( ksparseamain.idx.ptr.p_int[offsmain] = k; state->sparseamain.vals.ptr.p_double[offsmain] = v; offsmain = offsmain+1; } else { ae_assert(state->tmpi.ptr.p_int[k-nmain]==0, "VIPMSetConstraints: slack column contains more than one nonzero element", _state); state->combinedaslack.idx.ptr.p_int[offscombined] = k-nmain; state->combinedaslack.vals.ptr.p_double[offscombined] = v; state->tmpi.ptr.p_int[k-nmain] = state->tmpi.ptr.p_int[k-nmain]+1; offscombined = offscombined+1; } } state->sparseamain.ridx.ptr.p_int[i+1] = offsmain; state->combinedaslack.ridx.ptr.p_int[i+1] = offscombined; } } sparsecreatecrsinplace(&state->sparseamain, _state); if( mdense>0 ) { rmatrixsetlengthatleast(&state->denseamain, mdense, nmain, _state); rmatrixcopy(mdense, nmain, &state->denseafull, 0, 0, &state->denseamain, 0, 0, _state); for(i=0; i<=mdense-1; i++) { offscombined = state->combinedaslack.ridx.ptr.p_int[msparse+i]; for(k=nmain; k<=n-1; k++) { if( state->denseafull.ptr.pp_double[i][k]!=0 ) { ae_assert(state->tmpi.ptr.p_int[k-nmain]==0, "VIPMSetConstraints: slack column contains more than one nonzero element", _state); state->combinedaslack.idx.ptr.p_int[offscombined] = k-nmain; state->combinedaslack.vals.ptr.p_double[offscombined] = state->denseafull.ptr.pp_double[i][k]; state->tmpi.ptr.p_int[k-nmain] = state->tmpi.ptr.p_int[k-nmain]+1; offscombined = offscombined+1; } } state->combinedaslack.ridx.ptr.p_int[msparse+i+1] = offscombined; } } sparsecreatecrsinplace(&state->combinedaslack, _state); } /************************************************************************* Sets stopping criteria for QP-IPM solver. You can set all epsilon-values to one small value, roughly 1.0E-6 or 1.0E-8. INPUT PARAMETERS: State - instance initialized with one of the initialization functions EpsP - maximum primal error allowed in the solution, EpsP>=0. Zero will be automatically replaced by recommended default value, which is equal to sqrt(machineEpsilon) in the current version EpsD - maximum dual error allowed in the solution, EpsP>=0. Zero will be automatically replaced by recommended default value, which is equal to sqrt(machineEpsilon) in the current version EpsGap - maximum duality gap allowed in the solution, EpsP>=0. Zero will be automatically replaced by recommended default value, which is equal to sqrt(machineEpsilon) in the current version -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipmsetcond(vipmstate* state, double epsp, double epsd, double epsgap, ae_state *_state) { ae_assert(ae_isfinite(epsp, _state)&&ae_fp_greater_eq(epsp,(double)(0)), "VIPMSetCond: EpsP is infinite or negative", _state); ae_assert(ae_isfinite(epsd, _state)&&ae_fp_greater_eq(epsd,(double)(0)), "VIPMSetCond: EpsD is infinite or negative", _state); ae_assert(ae_isfinite(epsgap, _state)&&ae_fp_greater_eq(epsgap,(double)(0)), "VIPMSetCond: EpsP is infinite or negative", _state); state->epsp = coalesce(epsp, ae_sqrt(ae_machineepsilon, _state), _state); state->epsd = coalesce(epsd, ae_sqrt(ae_machineepsilon, _state), _state); state->epsgap = coalesce(epsgap, ae_sqrt(ae_machineepsilon, _state), _state); } /************************************************************************* -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void vipmoptimize(vipmstate* state, /* Real */ ae_vector* xs, /* Real */ ae_vector* lagbc, /* Real */ ae_vector* laglc, ae_int_t* terminationtype, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; ae_bool dotrace; ae_bool dodetailedtrace; ae_int_t iteridx; double mu; double muaff; double sigma; double alphaaffp; double alphaaffd; double alphap; double alphad; ae_int_t primalstagnationcnt; ae_int_t dualstagnationcnt; double preverrp2; double preverrd2; double eprimal; double edual; double egap; double bnrminf; double cnrminf; double mumin; double mustop; *terminationtype = 0; n = state->n; m = state->mdense+state->msparse; dotrace = ae_is_trace_enabled("IPM"); dodetailedtrace = dotrace&&ae_is_trace_enabled("IPM.DETAILED"); /* * Allocate output */ rvectorsetlengthatleast(xs, n, _state); rvectorsetlengthatleast(lagbc, n, _state); rvectorsetlengthatleast(laglc, m, _state); for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = 0.0; lagbc->ptr.p_double[i] = 0.0; } for(i=0; i<=m-1; i++) { laglc->ptr.p_double[i] = 0.0; } /* * Some integrity checks: * * we need PrimalStagnationLen initial point was generated\n"); } vipmsolver_vipmpowerup(state, _state); vipmsolver_vipminitialpoint(state, _state); vipmsolver_vipmevaluateprogress(state, dotrace, dodetailedtrace, 0.0, 0.0, 0.0, 0.0, 0.0, _state); /* * Start iteration */ mustop = ae_machineepsilon; mumin = 0.01*mustop; bnrminf = vipmsolver_nrminf(&state->b, m, _state); cnrminf = vipmsolver_nrminf(&state->c, n, _state); primalstagnationcnt = 0; dualstagnationcnt = 0; *terminationtype = 7; for(iteridx=0; iteridx<=vipmsolver_maxipmits-1; iteridx++) { /* * Precompute factorization */ vipmsolver_vipmprecomputenewtonfactorization(state, &state->current, _state); /* * Compute Mu */ mu = vipmsolver_varscomputemu(&state->current, _state); /* * Compute affine scaling step for Mehrotra's predictor-corrector algorithm */ vipmsolver_varsinitbyzero(&state->deltaaff, n, m, _state); vipmsolver_vipmcomputestepdirection(state, &state->current, 0.0, &state->deltaaff, _state); vipmsolver_vipmcomputesteplength(state, &state->current, &state->deltaaff, vipmsolver_affinesteplengthdecay, &alphaaffp, &alphaaffd, _state); /* * Compute MuAff and centering parameter */ vipmsolver_varsinitfrom(&state->trial, &state->current, _state); vipmsolver_varsaddstep(&state->trial, &state->deltaaff, alphaaffp, alphaaffd, _state); muaff = vipmsolver_varscomputemu(&state->trial, _state); sigma = ae_pow((muaff+mumin)/(mu+mumin), (double)(3), _state); ae_assert(ae_fp_less_eq(sigma,(double)(1)), "VIPMOptimize: critical integrity check failed, Sigma>1", _state); /* * Compute corrector step */ vipmsolver_varsinitfrom(&state->deltacorr, &state->deltaaff, _state); vipmsolver_vipmcomputestepdirection(state, &state->current, sigma*mu+mumin, &state->deltacorr, _state); vipmsolver_vipmcomputesteplength(state, &state->current, &state->deltacorr, vipmsolver_steplengthdecay, &alphap, &alphad, _state); /* * Perform a step */ vipmsolver_varsaddstep(&state->current, &state->deltacorr, alphap, alphad, _state); inc(&state->repiterationscount, _state); /* * Evaluate progress so far */ preverrp2 = state->errp2; preverrd2 = state->errd2; if( dotrace ) { ae_trace("=== A PREDICTOR-CORRECTOR STEP %2d WAS PERFORMED ====================================================\n", (int)(iteridx)); } vipmsolver_vipmevaluateprogress(state, dotrace, dodetailedtrace, mu, muaff, sigma, alphap, alphad, _state); /* * Check stopping criteria */ if( (ae_fp_greater_eq(state->errp2,vipmsolver_stagnationdelta*preverrp2)&&ae_fp_greater_eq(state->errpinf,vipmsolver_primalinfeasible1))&&iteridx>=vipmsolver_minitersbeforestagnation ) { inc(&primalstagnationcnt, _state); } else { primalstagnationcnt = 0; } if( (ae_fp_greater_eq(state->errd2,vipmsolver_stagnationdelta*preverrd2)&&ae_fp_greater_eq(state->errdinf,vipmsolver_dualinfeasible1))&&iteridx>=vipmsolver_minitersbeforestagnation ) { inc(&dualstagnationcnt, _state); } else { dualstagnationcnt = 0; } mu = vipmsolver_varscomputemu(&state->current, _state); egap = vipmsolver_varscomputecomplementaritygap(&state->current, _state)/(1.0+ae_fabs(vipmsolver_vipmtarget(state, &state->current.x, _state), _state)); eprimal = state->errpinf/(1.0+bnrminf); edual = state->errdinf/(1.0+cnrminf); if( ae_fp_less_eq(mu,mustop)&&iteridx>=vipmsolver_itersfortoostringentcond ) { if( dotrace ) { ae_trace("> stopping conditions are too stringent, stopping at best point found so far\n"); } *terminationtype = 7; break; } if( (ae_fp_less_eq(egap,state->epsgap)&&ae_fp_less_eq(eprimal,state->epsp))&&ae_fp_less_eq(edual,state->epsd) ) { if( dotrace ) { ae_trace("> stopping criteria are met\n"); } *terminationtype = 1; break; } if( primalstagnationcnt>=vipmsolver_primalstagnationlen ) { if( dotrace ) { ae_trace("> primal error stagnated for %0d its, stopping at best point found so far\n", (int)(vipmsolver_primalstagnationlen)); } *terminationtype = 7; break; } if( dualstagnationcnt>=vipmsolver_dualstagnationlen ) { if( dotrace ) { ae_trace("> dual error stagnated for %0d its, declaring unboundedness\n", (int)(vipmsolver_dualstagnationlen)); } *terminationtype = -4; break; } if( ae_fp_greater_eq(vipmsolver_nrminf(&state->current.y, m, _state),vipmsolver_bigy) ) { if( dotrace ) { ae_trace("> |Y| increased beyond %0.1e, declaring infeasibility\n", (double)(vipmsolver_bigy)); } *terminationtype = -3; break; } } /* * Output */ for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = state->current.x.ptr.p_double[i]; lagbc->ptr.p_double[i] = 0.0; if( state->hasbndl.ptr.p_bool[i] ) { lagbc->ptr.p_double[i] = lagbc->ptr.p_double[i]-state->current.z.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { lagbc->ptr.p_double[i] = lagbc->ptr.p_double[i]+state->current.s.ptr.p_double[i]; } } for(i=0; i<=m-1; i++) { laglc->ptr.p_double[i] = -state->current.y.ptr.p_double[i]; } /* * Unscale point and Lagrange multipliers */ unscaleunshiftpointbc(&state->scl, &state->xorigin, &state->rawbndl, &state->rawbndu, &state->bndl, &state->bndu, &state->hasbndl, &state->hasbndu, xs, n, _state); for(i=0; i<=n-1; i++) { lagbc->ptr.p_double[i] = lagbc->ptr.p_double[i]*state->targetscale/state->scl.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { laglc->ptr.p_double[i] = laglc->ptr.p_double[i]*state->targetscale/coalesce(state->ascales.ptr.p_double[i], 1.0, _state); } } /************************************************************************* Allocates place for variables of Vanderbei IPM and fills by zeros. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_varsinitbyzero(vipmvars* vstate, ae_int_t n, ae_int_t m, ae_state *_state) { ae_int_t i; ae_assert(n>=1, "VarsInitByZero: N<1", _state); ae_assert(m>=0, "VarsInitByZero: M<0", _state); vstate->n = n; vstate->m = m; rvectorsetlengthatleast(&vstate->x, n, _state); rvectorsetlengthatleast(&vstate->g, n, _state); rvectorsetlengthatleast(&vstate->t, n, _state); rvectorsetlengthatleast(&vstate->w, m, _state); rvectorsetlengthatleast(&vstate->p, m, _state); rvectorsetlengthatleast(&vstate->z, n, _state); rvectorsetlengthatleast(&vstate->s, n, _state); rvectorsetlengthatleast(&vstate->y, m, _state); rvectorsetlengthatleast(&vstate->v, m, _state); rvectorsetlengthatleast(&vstate->q, m, _state); for(i=0; i<=n-1; i++) { vstate->x.ptr.p_double[i] = (double)(0); vstate->g.ptr.p_double[i] = (double)(0); vstate->t.ptr.p_double[i] = (double)(0); vstate->z.ptr.p_double[i] = (double)(0); vstate->s.ptr.p_double[i] = (double)(0); } for(i=0; i<=m-1; i++) { vstate->w.ptr.p_double[i] = (double)(0); vstate->p.ptr.p_double[i] = (double)(0); vstate->y.ptr.p_double[i] = (double)(0); vstate->v.ptr.p_double[i] = (double)(0); vstate->q.ptr.p_double[i] = (double)(0); } } /************************************************************************* Allocates place for variables of Vanderbei IPM and fills them by values of the source -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_varsinitfrom(vipmvars* vstate, vipmvars* vsrc, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t m; n = vsrc->n; m = vsrc->m; ae_assert(n>=1, "VarsInitFrom: N<1", _state); ae_assert(m>=0, "VarsInitFrom: M<0", _state); vstate->n = n; vstate->m = m; rvectorsetlengthatleast(&vstate->x, n, _state); rvectorsetlengthatleast(&vstate->g, n, _state); rvectorsetlengthatleast(&vstate->t, n, _state); rvectorsetlengthatleast(&vstate->w, m, _state); rvectorsetlengthatleast(&vstate->p, m, _state); rvectorsetlengthatleast(&vstate->z, n, _state); rvectorsetlengthatleast(&vstate->s, n, _state); rvectorsetlengthatleast(&vstate->y, m, _state); rvectorsetlengthatleast(&vstate->v, m, _state); rvectorsetlengthatleast(&vstate->q, m, _state); for(i=0; i<=n-1; i++) { vstate->x.ptr.p_double[i] = vsrc->x.ptr.p_double[i]; vstate->g.ptr.p_double[i] = vsrc->g.ptr.p_double[i]; vstate->t.ptr.p_double[i] = vsrc->t.ptr.p_double[i]; vstate->z.ptr.p_double[i] = vsrc->z.ptr.p_double[i]; vstate->s.ptr.p_double[i] = vsrc->s.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { vstate->w.ptr.p_double[i] = vsrc->w.ptr.p_double[i]; vstate->p.ptr.p_double[i] = vsrc->p.ptr.p_double[i]; vstate->y.ptr.p_double[i] = vsrc->y.ptr.p_double[i]; vstate->v.ptr.p_double[i] = vsrc->v.ptr.p_double[i]; vstate->q.ptr.p_double[i] = vsrc->q.ptr.p_double[i]; } } /************************************************************************* Adds to Vanderbei variables direction vector times step length. Different lengths are used for primal and dual steps. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_varsaddstep(vipmvars* vstate, vipmvars* vdir, double stpp, double stpd, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t m; n = vstate->n; m = vstate->m; ae_assert(n>=1, "VarsAddStep: N<1", _state); ae_assert(m>=0, "VarsAddStep: M<0", _state); ae_assert(n==vdir->n, "VarsAddStep: sizes mismatch", _state); ae_assert(m==vdir->m, "VarsAddStep: sizes mismatch", _state); for(i=0; i<=n-1; i++) { vstate->x.ptr.p_double[i] = vstate->x.ptr.p_double[i]+stpp*vdir->x.ptr.p_double[i]; vstate->g.ptr.p_double[i] = vstate->g.ptr.p_double[i]+stpp*vdir->g.ptr.p_double[i]; vstate->t.ptr.p_double[i] = vstate->t.ptr.p_double[i]+stpp*vdir->t.ptr.p_double[i]; vstate->z.ptr.p_double[i] = vstate->z.ptr.p_double[i]+stpd*vdir->z.ptr.p_double[i]; vstate->s.ptr.p_double[i] = vstate->s.ptr.p_double[i]+stpd*vdir->s.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { vstate->w.ptr.p_double[i] = vstate->w.ptr.p_double[i]+stpp*vdir->w.ptr.p_double[i]; vstate->p.ptr.p_double[i] = vstate->p.ptr.p_double[i]+stpp*vdir->p.ptr.p_double[i]; vstate->y.ptr.p_double[i] = vstate->y.ptr.p_double[i]+stpd*vdir->y.ptr.p_double[i]; vstate->v.ptr.p_double[i] = vstate->v.ptr.p_double[i]+stpd*vdir->v.ptr.p_double[i]; vstate->q.ptr.p_double[i] = vstate->q.ptr.p_double[i]+stpd*vdir->q.ptr.p_double[i]; } } /************************************************************************* Computes complementarity gap -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static double vipmsolver_varscomputecomplementaritygap(vipmvars* vstate, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t m; double result; n = vstate->n; m = vstate->m; result = (double)(0); for(i=0; i<=n-1; i++) { result = result+vstate->z.ptr.p_double[i]*vstate->g.ptr.p_double[i]+vstate->s.ptr.p_double[i]*vstate->t.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { result = result+vstate->v.ptr.p_double[i]*vstate->w.ptr.p_double[i]+vstate->p.ptr.p_double[i]*vstate->q.ptr.p_double[i]; } return result; } /************************************************************************* Computes empirical value of the barrier parameter Mu -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static double vipmsolver_varscomputemu(vipmvars* vstate, ae_state *_state) { ae_int_t i; ae_int_t k; double result; k = 0; for(i=0; i<=vstate->n-1; i++) { if( !(vstate->z.ptr.p_double[i]*vstate->g.ptr.p_double[i]==0.0) ) { k = k+1; } if( !(vstate->s.ptr.p_double[i]*vstate->t.ptr.p_double[i]==0.0) ) { k = k+1; } } for(i=0; i<=vstate->m-1; i++) { if( !(vstate->v.ptr.p_double[i]*vstate->w.ptr.p_double[i]==0.0) ) { k = k+1; } if( !(vstate->p.ptr.p_double[i]*vstate->q.ptr.p_double[i]==0.0) ) { k = k+1; } } result = vipmsolver_varscomputecomplementaritygap(vstate, _state)/coalesce((double)(k), (double)(1), _state); return result; } /************************************************************************* Initializes QP-IPM state and prepares it to receive quadratic/linear terms and constraints. The solver is configured to work internally with factorization FType INPUT PARAMETERS: State - solver state to be configured; previously allocated memory is reused as much as possible S - scale vector, array[N]: * I-th element contains scale of I-th variable, * S[I]>0 XOrigin - origin term, array[N]. Can be zero. The solver solves problem of the form > > min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) > The terms A and b (as well as constraints) will be specified later with separate calls. FType - factorization type: * 0 for dense NxN factorization (normal equations) * 1 for sparse (N+M)x(N+M) factorization -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipminit(vipmstate* state, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, ae_int_t nmain, ae_int_t ftype, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t nslack; ae_assert(n>=1, "VIPMInit: N<1", _state); ae_assert(isfinitevector(s, n, _state), "VIPMInit: S contains infinite or NaN elements", _state); ae_assert(isfinitevector(xorigin, n, _state), "VIPMInit: XOrigin contains infinite or NaN elements", _state); ae_assert(ftype==0||ftype==1, "VIPMInit: unexpected FType", _state); ae_assert(nmain>=1, "VIPMInit: NMain<1", _state); ae_assert(nmain<=n, "VIPMInit: NMain>N", _state); nslack = n-nmain; /* * Problem metrics and type */ state->regeps = ae_sqrt(ae_machineepsilon, _state); state->epsp = ae_sqrt(ae_machineepsilon, _state); state->epsd = ae_sqrt(ae_machineepsilon, _state); state->epsgap = ae_sqrt(ae_machineepsilon, _state); state->n = n; state->nmain = nmain; state->islinear = ae_true; state->factorizationtype = ftype; state->factorizationpresent = ae_false; state->factorizationpoweredup = ae_false; /* * Reports */ state->repiterationscount = 0; state->repncholesky = 0; /* * Scale and origin */ rvectorsetlengthatleast(&state->scl, n, _state); rvectorsetlengthatleast(&state->invscl, n, _state); rvectorsetlengthatleast(&state->xorigin, n, _state); for(i=0; i<=n-1; i++) { ae_assert(s->ptr.p_double[i]>0.0, "VIPMInit: S[i] is non-positive", _state); state->scl.ptr.p_double[i] = s->ptr.p_double[i]; state->invscl.ptr.p_double[i] = 1/s->ptr.p_double[i]; state->xorigin.ptr.p_double[i] = xorigin->ptr.p_double[i]; } state->targetscale = 1.0; /* * Linear and quadratic terms - default value */ rvectorsetlengthatleast(&state->c, n, _state); for(i=0; i<=n-1; i++) { state->c.ptr.p_double[i] = (double)(0); } state->hkind = -1; if( ftype==0 ) { /* * Dense quadratic term */ rmatrixsetlengthatleast(&state->denseh, nmain, nmain, _state); for(i=0; i<=nmain-1; i++) { for(j=0; j<=i; j++) { state->denseh.ptr.pp_double[i][j] = (double)(0); } } state->hkind = 0; } if( ftype==1 ) { /* * Sparse quadratic term */ state->sparseh.matrixtype = 1; state->sparseh.m = n; state->sparseh.n = n; state->sparseh.ninitialized = n; ivectorsetlengthatleast(&state->sparseh.idx, n, _state); rvectorsetlengthatleast(&state->sparseh.vals, n, _state); ivectorsetlengthatleast(&state->sparseh.ridx, n+1, _state); for(i=0; i<=n-1; i++) { state->sparseh.idx.ptr.p_int[i] = i; state->sparseh.vals.ptr.p_double[i] = 0.0; state->sparseh.ridx.ptr.p_int[i] = i; } state->sparseh.ridx.ptr.p_int[n] = n; sparsecreatecrsinplace(&state->sparseh, _state); state->hkind = 1; } ae_assert(state->hkind>=0, "VIPMInit: integrity check failed", _state); /* * Box constraints - default values */ rvectorsetlengthatleast(&state->bndl, n, _state); rvectorsetlengthatleast(&state->bndu, n, _state); bvectorsetlengthatleast(&state->hasbndl, n, _state); bvectorsetlengthatleast(&state->hasbndu, n, _state); for(i=0; i<=n-1; i++) { state->hasbndl.ptr.p_bool[i] = ae_false; state->hasbndu.ptr.p_bool[i] = ae_false; state->bndl.ptr.p_double[i] = _state->v_neginf; state->bndu.ptr.p_double[i] = _state->v_posinf; } /* * Linear constraints - empty */ state->mdense = 0; state->msparse = 0; state->combinedaslack.m = 0; state->combinedaslack.n = nslack; state->sparseamain.m = 0; state->sparseamain.n = nmain; sparsecreatecrsinplace(&state->sparseamain, _state); sparsecreatecrsinplace(&state->combinedaslack, _state); } /************************************************************************* Returns INF-norm -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static double vipmsolver_nrminf(/* Real */ ae_vector* x, ae_int_t n, ae_state *_state) { ae_int_t i; double result; result = (double)(0); for(i=0; i<=n-1; i++) { result = ae_maxreal(result, ae_fabs(x->ptr.p_double[i], _state), _state); } return result; } /************************************************************************* This function computes initial point and loads it to State.Current -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipminitialpoint(vipmstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; double v; double priorcoeff; n = state->n; m = state->mdense+state->msparse; vipmsolver_varsinitbyzero(&state->current, n, m, _state); /* * Set up initial values of primal and dual variables X and Y by solving * modified KKT system which tries to enforce linear constraints (ignoring * box constraints for a while) subject to minimization of additional prior * term which moves solution towards some interior point. */ priorcoeff = 1.0; vipmsolver_vipmfactorize(state, 0.0, &state->diagd, 0.0, &state->diage, priorcoeff, priorcoeff, _state); ae_vector_set_length(&state->deltaxy, n+m, _state); for(i=0; i<=n-1; i++) { /* * Compute I-th component of the prior term. The prior shifts solution * towards interior point */ v = (double)(0); if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { v = 0.5*(state->bndl.ptr.p_double[i]+state->bndu.ptr.p_double[i]); } if( state->hasbndl.ptr.p_bool[i]&&!state->hasbndu.ptr.p_bool[i] ) { v = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&!state->hasbndl.ptr.p_bool[i] ) { v = state->bndu.ptr.p_double[i]; } /* * Right hand side */ state->deltaxy.ptr.p_double[i] = state->c.ptr.p_double[i]-priorcoeff*v; } for(i=0; i<=m-1; i++) { /* * Linear constraints are either at B[i] or B[I]+0.5*R[i] */ v = (double)(0); if( state->hasr.ptr.p_bool[i] ) { v = 0.5*state->r.ptr.p_double[i]; } state->deltaxy.ptr.p_double[n+i] = state->b.ptr.p_double[i]+v; } vipmsolver_vipmsolve(state, &state->deltaxy, _state); for(i=0; i<=n-1; i++) { state->current.x.ptr.p_double[i] = state->deltaxy.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { state->current.y.ptr.p_double[i] = state->deltaxy.ptr.p_double[n+i]; } /* * Set up default values of slacks. */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state->current.g.ptr.p_double[i] = vipmsolver_initslackval; } if( state->hasbndu.ptr.p_bool[i] ) { state->current.t.ptr.p_double[i] = vipmsolver_initslackval; } } for(i=0; i<=m-1; i++) { state->current.w.ptr.p_double[i] = vipmsolver_initslackval; if( state->hasr.ptr.p_bool[i] ) { state->current.p.ptr.p_double[i] = vipmsolver_initslackval; } } for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state->current.z.ptr.p_double[i] = vipmsolver_initslackval; } if( state->hasbndu.ptr.p_bool[i] ) { state->current.s.ptr.p_double[i] = vipmsolver_initslackval; } } for(i=0; i<=m-1; i++) { state->current.v.ptr.p_double[i] = vipmsolver_initslackval; if( state->hasr.ptr.p_bool[i] ) { state->current.q.ptr.p_double[i] = vipmsolver_initslackval; } } /* * Then, having (a) initial values of X[] and Y[i], and (b) default values of nonnegatively * constrained slacks try to deduce better initial guess for slacks. */ vipmsolver_vipmmultiply(state, &state->current.x, &state->current.y, &state->tmphx, &state->tmpax, &state->tmpaty, _state); for(i=0; i<=n-1; i++) { /* * Better initial guess for box constraint primal slacks */ if( state->hasbndl.ptr.p_bool[i] ) { state->current.g.ptr.p_double[i] = ae_maxreal(state->current.g.ptr.p_double[i], ae_fabs(state->current.x.ptr.p_double[i]-state->bndl.ptr.p_double[i], _state), _state); state->current.z.ptr.p_double[i] = ae_maxreal(state->current.z.ptr.p_double[i], ae_fabs(state->current.x.ptr.p_double[i], _state), _state); } if( state->hasbndu.ptr.p_bool[i] ) { state->current.t.ptr.p_double[i] = ae_maxreal(state->current.t.ptr.p_double[i], ae_fabs(state->bndu.ptr.p_double[i]-state->current.x.ptr.p_double[i], _state), _state); state->current.s.ptr.p_double[i] = ae_maxreal(state->current.s.ptr.p_double[i], ae_fabs(state->current.x.ptr.p_double[i], _state), _state); } } for(i=0; i<=m-1; i++) { /* * Better initial guess for linear constraint primal slacks */ state->current.w.ptr.p_double[i] = ae_maxreal(state->current.w.ptr.p_double[i], ae_fabs(state->current.y.ptr.p_double[i], _state), _state); state->current.v.ptr.p_double[i] = ae_maxreal(state->current.v.ptr.p_double[i], ae_fabs(state->current.y.ptr.p_double[i], _state), _state); if( state->hasr.ptr.p_bool[i] ) { state->current.p.ptr.p_double[i] = ae_maxreal(state->current.p.ptr.p_double[i], ae_fabs(state->r.ptr.p_double[i]-state->current.w.ptr.p_double[i], _state), _state); state->current.q.ptr.p_double[i] = ae_maxreal(state->current.q.ptr.p_double[i], ae_fabs(state->r.ptr.p_double[i]-state->current.w.ptr.p_double[i], _state), _state); } } } /************************************************************************* Computes target function 0.5*x'*H*x+c'*x -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static double vipmsolver_vipmtarget(vipmstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; ae_int_t nmain; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t j0; ae_int_t j1; double v; double result; n = state->n; nmain = state->nmain; ae_assert(state->hkind==0||state->hkind==1, "VIPMTarget: unexpected HKind", _state); result = (double)(0); /* * Dense */ if( state->hkind==0 ) { for(i=0; i<=nmain-1; i++) { for(j=0; j<=i-1; j++) { result = result+x->ptr.p_double[i]*state->denseh.ptr.pp_double[i][j]*x->ptr.p_double[j]; } result = result+0.5*x->ptr.p_double[i]*x->ptr.p_double[i]*state->denseh.ptr.pp_double[i][i]; } for(i=0; i<=n-1; i++) { result = result+state->c.ptr.p_double[i]*x->ptr.p_double[i]; } return result; } /* * Sparse */ if( state->hkind==1 ) { result = (double)(0); for(i=0; i<=n-1; i++) { result = result+state->c.ptr.p_double[i]*x->ptr.p_double[i]; j0 = state->sparseh.ridx.ptr.p_int[i]; j1 = state->sparseh.didx.ptr.p_int[i]-1; for(k=j0; k<=j1; k++) { v = state->sparseh.vals.ptr.p_double[k]; j = state->sparseh.idx.ptr.p_int[k]; result = result+v*x->ptr.p_double[i]*x->ptr.p_double[j]; } ae_assert(state->sparseh.uidx.ptr.p_int[i]!=state->sparseh.didx.ptr.p_int[i], "VIPMTarget: sparse diagonal not found", _state); v = state->sparseh.vals.ptr.p_double[state->sparseh.didx.ptr.p_int[i]]; result = result+0.5*v*x->ptr.p_double[i]*x->ptr.p_double[i]; } return result; } return result; } /************************************************************************* Computes products H*x, A*x, A^T*y -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmmultiply(vipmstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* y, /* Real */ ae_vector* hx, /* Real */ ae_vector* ax, /* Real */ ae_vector* aty, ae_state *_state) { ae_int_t n; ae_int_t nmain; ae_int_t m; ae_int_t mdense; ae_int_t msparse; ae_int_t i; n = state->n; nmain = state->nmain; m = state->mdense+state->msparse; mdense = state->mdense; msparse = state->msparse; /* * Allocate */ rvectorsetlengthatleast(hx, n, _state); rvectorsetlengthatleast(ax, m, _state); rvectorsetlengthatleast(aty, n, _state); /* * Compute A*x */ if( msparse>0 ) { sparsegemv(&state->sparseafull, 1.0, 0, x, 0, 0.0, ax, 0, _state); } if( mdense>0 ) { rmatrixgemv(mdense, n, 1.0, &state->denseafull, 0, 0, 0, x, 0, 0.0, ax, msparse, _state); } /* * Compute A^T*y */ for(i=0; i<=n-1; i++) { aty->ptr.p_double[i] = (double)(0); } if( msparse>0 ) { sparsegemv(&state->sparseafull, 1.0, 1, y, 0, 1.0, aty, 0, _state); } if( mdense>0 ) { rmatrixgemv(n, mdense, 1.0, &state->denseafull, 0, 0, 1, y, msparse, 1.0, aty, 0, _state); } /* * Compute H*x */ ae_assert(state->hkind==0||state->hkind==1, "VIPMMultiply: unexpected HKind", _state); if( state->hkind==0 ) { rmatrixsymv(nmain, 1.0, &state->denseh, 0, 0, ae_false, x, 0, 0.0, hx, 0, _state); for(i=nmain; i<=n-1; i++) { hx->ptr.p_double[i] = (double)(0); } } if( state->hkind==1 ) { ae_assert(state->sparseh.n==n&&state->sparseh.m==n, "VIPMMultiply: sparse H has incorrect size", _state); sparsesmv(&state->sparseh, ae_false, x, hx, _state); } } /************************************************************************* This function "powers up" factorization, i.e. prepares some important temporaries. It should be called once prior to the first call to VIPMInitialPoint() or VIPMFactorize(). Subsequent calls are possible (no actions will be taken), but not needed. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmpowerup(vipmstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t nnzhalfmax; ae_int_t nnzfull; ae_int_t offs; ae_int_t rowoffs; ae_int_t i; ae_int_t k; ae_int_t k0; ae_int_t k1; double v; ae_assert(state->factorizationtype==0||state->factorizationtype==1, "VIPMFactorize: unexpected factorization type", _state); n = state->n; m = state->mdense+state->msparse; /* * Skip if already powered up */ if( state->factorizationpoweredup ) { return; } /* * Only sparse factorization needs powering up */ if( state->factorizationtype==1 ) { ae_assert(state->hkind==1, "VIPMPowerUp: unexpected HKind", _state); nnzhalfmax = state->sparseh.ridx.ptr.p_int[n]; if( state->msparse>0 ) { nnzhalfmax = nnzhalfmax+state->sparseafull.ridx.ptr.p_int[state->msparse]; } if( state->mdense>0 ) { nnzhalfmax = nnzhalfmax+n*state->mdense; } /* * Prepare strictly lower triangle of template KKT matrix (KKT system without D and E * terms being added to diagonals) */ state->tmpsparse0.m = n+m; state->tmpsparse0.n = n+m; ivectorsetlengthatleast(&state->tmpsparse0.idx, nnzhalfmax, _state); rvectorsetlengthatleast(&state->tmpsparse0.vals, nnzhalfmax, _state); ivectorsetlengthatleast(&state->tmpsparse0.ridx, n+m+1, _state); state->tmpsparse0.ridx.ptr.p_int[0] = 0; offs = 0; rowoffs = 0; for(i=0; i<=n-1; i++) { k0 = state->sparseh.ridx.ptr.p_int[i]; k1 = state->sparseh.didx.ptr.p_int[i]-1; for(k=k0; k<=k1; k++) { state->tmpsparse0.idx.ptr.p_int[offs] = state->sparseh.idx.ptr.p_int[k]; state->tmpsparse0.vals.ptr.p_double[offs] = -state->sparseh.vals.ptr.p_double[k]; offs = offs+1; } rowoffs = rowoffs+1; state->tmpsparse0.ridx.ptr.p_int[rowoffs] = offs; } for(i=0; i<=state->msparse-1; i++) { k0 = state->sparseafull.ridx.ptr.p_int[i]; k1 = state->sparseafull.ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { state->tmpsparse0.idx.ptr.p_int[offs] = state->sparseafull.idx.ptr.p_int[k]; state->tmpsparse0.vals.ptr.p_double[offs] = state->sparseafull.vals.ptr.p_double[k]; offs = offs+1; } rowoffs = rowoffs+1; state->tmpsparse0.ridx.ptr.p_int[rowoffs] = offs; } for(i=0; i<=state->mdense-1; i++) { for(k=0; k<=n-1; k++) { if( state->denseafull.ptr.pp_double[i][k]!=0.0 ) { state->tmpsparse0.idx.ptr.p_int[offs] = k; state->tmpsparse0.vals.ptr.p_double[offs] = state->denseafull.ptr.pp_double[i][k]; offs = offs+1; } } rowoffs = rowoffs+1; state->tmpsparse0.ridx.ptr.p_int[rowoffs] = offs; } ae_assert(rowoffs==m+n, "VIPMPowerUp: critical integrity check failed", _state); ae_assert(offs<=nnzhalfmax, "VIPMPowerUp: critical integrity check failed", _state); sparsecreatecrsinplace(&state->tmpsparse0, _state); /* * Prepare strictly upper triangle of KKT system */ sparsecopytransposecrsbuf(&state->tmpsparse0, &state->tmpsparse1, _state); /* * Merge triangles */ nnzfull = 2*state->tmpsparse0.ridx.ptr.p_int[m+n]+(m+n); state->factsparsekkttmpl.m = m+n; state->factsparsekkttmpl.n = m+n; ivectorsetlengthatleast(&state->factsparsekkttmpl.idx, nnzfull, _state); rvectorsetlengthatleast(&state->factsparsekkttmpl.vals, nnzfull, _state); ivectorsetlengthatleast(&state->factsparsekkttmpl.ridx, n+m+1, _state); state->factsparsekkttmpl.ridx.ptr.p_int[0] = 0; offs = 0; for(i=0; i<=n+m-1; i++) { /* * Merge in lower triangle */ k0 = state->tmpsparse0.ridx.ptr.p_int[i]; k1 = state->tmpsparse0.ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { state->factsparsekkttmpl.idx.ptr.p_int[offs] = state->tmpsparse0.idx.ptr.p_int[k]; state->factsparsekkttmpl.vals.ptr.p_double[offs] = state->tmpsparse0.vals.ptr.p_double[k]; offs = offs+1; } /* * Merge in diagonal term */ v = 0.0; if( isparseh.didx.ptr.p_int[i]+1==state->sparseh.uidx.ptr.p_int[i], "VIPMPowerUp: critical integrity check failed for diagonal of H", _state); v = -state->sparseh.vals.ptr.p_double[state->sparseh.didx.ptr.p_int[i]]; } state->factsparsekkttmpl.idx.ptr.p_int[offs] = i; state->factsparsekkttmpl.vals.ptr.p_double[offs] = v; offs = offs+1; /* * Merge in upper triangle */ k0 = state->tmpsparse1.ridx.ptr.p_int[i]; k1 = state->tmpsparse1.ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { state->factsparsekkttmpl.idx.ptr.p_int[offs] = state->tmpsparse1.idx.ptr.p_int[k]; state->factsparsekkttmpl.vals.ptr.p_double[offs] = state->tmpsparse1.vals.ptr.p_double[k]; offs = offs+1; } /* * Finalize row */ state->factsparsekkttmpl.ridx.ptr.p_int[i+1] = offs; } ae_assert(offs==nnzfull, "VIPMPowerUp: critical integrity check failed", _state); sparsecreatecrsinplace(&state->factsparsekkttmpl, _state); } /* * Powered up! */ state->factorizationpoweredup = ae_true; } /************************************************************************* This function performs factorization of modified KKT system ( | ) ( -(H+alpha0*D+alpha1*I) | A^T ) ( | ) (------------------------|-----------------) ( | ) ( A | beta0*E+beta1*I ) ( | ) where: * H is an NxN quadratic term * A is an MxN matrix of linear constraint * alpha0, alpha1, beta0, beta1 are nonnegative scalars * D and E are diagonal matrices with nonnegative entries (which are ignored if alpha0 and beta0 are zero - arrays are not referenced at all) * I is an NxN or MxM identity matrix -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmfactorize(vipmstate* state, double alpha0, /* Real */ ae_vector* d, double beta0, /* Real */ ae_vector* e, double alpha1, double beta1, ae_state *_state) { ae_int_t n; ae_int_t nmain; ae_int_t nslack; ae_int_t m; ae_int_t mdense; ae_int_t msparse; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t k0; ae_int_t k1; ae_int_t ka; ae_int_t kb; ae_int_t ja; ae_int_t jb; double va; double vb; double v; double vv; ae_assert(ae_isfinite(alpha0, _state)&&ae_fp_greater_eq(alpha0,(double)(0)), "VIPMFactorize: Alpha0 is infinite or negative", _state); ae_assert(ae_isfinite(alpha1, _state)&&ae_fp_greater_eq(alpha1,(double)(0)), "VIPMFactorize: Alpha1 is infinite or negative", _state); ae_assert(ae_isfinite(beta0, _state)&&ae_fp_greater_eq(beta0,(double)(0)), "VIPMFactorize: Beta0 is infinite or negative", _state); ae_assert(ae_isfinite(beta1, _state)&&ae_fp_greater_eq(beta1,(double)(0)), "VIPMFactorize: Beta1 is infinite or negative", _state); ae_assert(state->factorizationtype==0||state->factorizationtype==1, "VIPMFactorize: unexpected factorization type", _state); ae_assert(state->factorizationpoweredup, "VIPMFactorize: critical integrity check failed (no powerup stage)", _state); n = state->n; nmain = state->nmain; nslack = n-nmain; m = state->mdense+state->msparse; mdense = state->mdense; msparse = state->msparse; state->factorizationpresent = ae_false; /* * Dense NxN normal equations approach */ if( state->factorizationtype==0 ) { /* * A problem formulation with possible slacks. * * We have to solve following system: * * [ -(H+Dh) Ah' ] [ Xh ] [ Bh ] * [ -Dz Az' ] [ Xz ] = [ Bz ] * [ Ah Az E ] [ Y ] [ By ] * * with Xh being NMain-dimensional vector, Xz being NSlack-dimensional vector, constraint * matrix A being divided into non-slack and slack parts Ah and Az (and Ah, in turn, being * divided into sparse and dense parts), Y being M-dimensional vector. * * NOTE: due to definition of slack variables following holds: for any diagonal matrix W * a product Az*W*Az' is a diagonal matrix. * * From the second line we get * * Xz = inv(Dz)*Az'*y - inv(Dz)*Bz * = inv(Dz)*Az'*y - BzWave * * Using this value for Zx, third line gives us * * Y = inv(E+Az*inv(Dz)*Az')*(By+Az*BzWave-Ah*Xh) * = inv(EWave)*(ByWave-Ah*Xh) * with EWave = E+Az*inv(Dz)*Az' and ByWave = By+Az*BzWave * * Finally, first line gives us * * Xh = -inv(H+Dh+Ah'*inv(EWave)*Ah)*(Bh-Ah'*inv(EWave)*ByWave) * = -inv(HWave)*BhWave * with HWave = H+Dh+Ah'*inv(EWave)*Ah and BhWave = Bh-Ah'*inv(EWave)*ByWave * * In order to prepare factorization we need to compute: * (a) diagonal matrices Dh and Dz (and precomputed inverse of Dz) * (b) EWave * (c) HWave * * First, we compute dense Dh and Dz */ rvectorsetlengthatleast(&state->factdh, nmain, _state); rvectorsetlengthatleast(&state->factdz, nslack, _state); rvectorsetlengthatleast(&state->factinvdz, nslack, _state); for(i=0; i<=n-1; i++) { v = (double)(0); if( ae_fp_greater(alpha0,(double)(0)) ) { v = v+alpha0*d->ptr.p_double[i]; } if( ae_fp_greater(alpha1,(double)(0)) ) { v = v+alpha1; } ae_assert(ae_fp_greater(v,(double)(0)), "VIPMFactorize: integrity check failed, degenerate diagonal matrix", _state); if( i>=nmain ) { state->factdz.ptr.p_double[i-nmain] = v; state->factinvdz.ptr.p_double[i-nmain] = 1/v; } else { state->factdh.ptr.p_double[i] = v; } } /* * Now we are ready to compute EWave */ rvectorsetlengthatleast(&state->facteffectivee, m, _state); for(i=0; i<=m-1; i++) { /* * Compute diagonal element of E */ v = (double)(0); if( ae_fp_greater(beta0,(double)(0)) ) { v = v+beta0*e->ptr.p_double[i]; } if( ae_fp_greater(beta1,(double)(0)) ) { v = v+beta1; } ae_assert(ae_fp_greater(v,(double)(0)), "VIPMFactorize: integrity check failed, degenerate diagonal matrix", _state); /* * Compute diagonal modification Az*inv(Dz)*Az' */ k0 = state->combinedaslack.ridx.ptr.p_int[i]; k1 = state->combinedaslack.ridx.ptr.p_int[i+1]-1; for(k=k0; k<=k1; k++) { vv = state->combinedaslack.vals.ptr.p_double[k]; v = v+vv*vv*state->factinvdz.ptr.p_double[state->combinedaslack.idx.ptr.p_int[k]]; } /* * Save EWave */ state->facteffectivee.ptr.p_double[i] = v; } /* * Now we are ready to compute HWave: * * store H * * add Dh * * add Ah'*inv(EWave)*Ah */ rmatrixsetlengthatleast(&state->factdensehaug, nmain, nmain, _state); ae_assert(state->hkind==0, "VIPMFactorize: unexpected HKind", _state); rmatrixcopy(nmain, nmain, &state->denseh, 0, 0, &state->factdensehaug, 0, 0, _state); for(i=0; i<=nmain-1; i++) { state->factdensehaug.ptr.pp_double[i][i] = state->factdensehaug.ptr.pp_double[i][i]+state->factdh.ptr.p_double[i]; } if( msparse>0 ) { /* * Handle sparse part of Ah in Ah'*inv(EWave)*Ah */ for(i=0; i<=msparse-1; i++) { v = 1.0/state->facteffectivee.ptr.p_double[i]; k0 = state->sparseamain.ridx.ptr.p_int[i]; k1 = state->sparseamain.ridx.ptr.p_int[i+1]-1; for(ka=k0; ka<=k1; ka++) { ja = state->sparseamain.idx.ptr.p_int[ka]; va = state->sparseamain.vals.ptr.p_double[ka]; for(kb=k0; kb<=ka; kb++) { jb = state->sparseamain.idx.ptr.p_int[kb]; vb = state->sparseamain.vals.ptr.p_double[kb]; state->factdensehaug.ptr.pp_double[ja][jb] = state->factdensehaug.ptr.pp_double[ja][jb]+v*va*vb; } } } } if( mdense>0 ) { /* * Handle dense part of Ah in Ah'*inv(EWave)*Ah */ rmatrixsetlengthatleast(&state->tmpr2, mdense, nmain, _state); rmatrixcopy(mdense, nmain, &state->denseamain, 0, 0, &state->tmpr2, 0, 0, _state); for(i=0; i<=mdense-1; i++) { v = 1.0/ae_sqrt(state->facteffectivee.ptr.p_double[msparse+i], _state); for(j=0; j<=nmain-1; j++) { state->tmpr2.ptr.pp_double[i][j] = v*state->tmpr2.ptr.pp_double[i][j]; } } rmatrixsyrk(nmain, mdense, 1.0, &state->tmpr2, 0, 0, 2, 1.0, &state->factdensehaug, 0, 0, ae_false, _state); } /* * Compute Cholesky factorization of HWave */ if( !spdmatrixcholesky(&state->factdensehaug, nmain, ae_false, _state) ) { ae_assert(ae_false, "VIPMFactorize: critical failure, nonconvex input detected", _state); } state->factorizationpresent = ae_true; } /* * Sparse (M+N)x(M+N) factorization */ if( state->factorizationtype==1 ) { sparsecopybuf(&state->factsparsekkttmpl, &state->factsparsekkt, _state); for(i=0; i<=n-1; i++) { ae_assert(state->factsparsekkt.didx.ptr.p_int[i]+1==state->factsparsekkt.uidx.ptr.p_int[i], "VIPMFactorize: integrity check failed, no diagonal element", _state); v = state->factsparsekkt.vals.ptr.p_double[state->factsparsekkt.didx.ptr.p_int[i]]; if( ae_fp_greater(alpha0,(double)(0)) ) { v = v-alpha0*d->ptr.p_double[i]; } if( ae_fp_greater(alpha1,(double)(0)) ) { v = v-alpha1; } state->factsparsekkt.vals.ptr.p_double[state->factsparsekkt.didx.ptr.p_int[i]] = v; } for(i=0; i<=msparse+mdense-1; i++) { ae_assert(state->factsparsekkt.didx.ptr.p_int[n+i]+1==state->factsparsekkt.uidx.ptr.p_int[n+i], "VIPMFactorize: integrity check failed, no diagonal element", _state); v = state->factsparsekkt.vals.ptr.p_double[state->factsparsekkt.didx.ptr.p_int[n+i]]; if( ae_fp_greater(beta0,(double)(0)) ) { v = v+beta0*e->ptr.p_double[i]; } if( ae_fp_greater(beta1,(double)(0)) ) { v = v+beta1; } ae_assert(ae_fp_greater(v,(double)(0)), "VIPMFactorize: integrity check failed, degenerate diagonal matrix", _state); state->factsparsekkt.vals.ptr.p_double[state->factsparsekkt.didx.ptr.p_int[n+i]] = v; } if( !sparselu(&state->factsparsekkt, 0, &state->factsparsekktpivp, &state->factsparsekktpivq, _state) ) { ae_assert(ae_false, "VIPMFactorize: critical failure, degenerate KKT system encountered", _state); } state->factorizationpresent = ae_true; } /* * Done, integrity control */ ae_assert(state->factorizationpresent, "VIPMFactorize: integrity check failed", _state); inc(&state->repncholesky, _state); } /************************************************************************* Solves KKT system whose factorization was prepared by VIPMFactorize(). On input, right-hand-side is stored in DeltaXY; on output, solution replaces DeltaXY. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmsolve(vipmstate* state, /* Real */ ae_vector* deltaxy, ae_state *_state) { ae_int_t n; ae_int_t nmain; ae_int_t nslack; ae_int_t m; ae_int_t mdense; ae_int_t msparse; ae_int_t i; ae_int_t j; double v; ae_assert(state->factorizationpresent, "VIPMSolve: integrity check failed - factorization is not present", _state); ae_assert(state->factorizationtype==0||state->factorizationtype==1, "VIPMSolve: unexpected factorization type", _state); n = state->n; nmain = state->nmain; nslack = n-nmain; m = state->mdense+state->msparse; mdense = state->mdense; msparse = state->msparse; /* * Dense solving */ if( state->factorizationtype==0 ) { /* * Compute * * BzWave = inv(Dz)*Bz * ByWave = By+Az*BzWave * BhWave = Bh-Ah'*inv(EWave)*ByWave */ for(i=0; i<=nslack-1; i++) { deltaxy->ptr.p_double[nmain+i] = deltaxy->ptr.p_double[nmain+i]*state->factinvdz.ptr.p_double[i]; } sparsegemv(&state->combinedaslack, 1.0, 0, deltaxy, nmain, 1.0, deltaxy, n, _state); rvectorsetlengthatleast(&state->tmp1, m, _state); for(i=0; i<=m-1; i++) { state->tmp1.ptr.p_double[i] = deltaxy->ptr.p_double[n+i]/state->facteffectivee.ptr.p_double[i]; } sparsegemv(&state->sparseamain, -1.0, 1, &state->tmp1, 0, 1.0, deltaxy, 0, _state); rmatrixgemv(nmain, mdense, -1.0, &state->denseamain, 0, 0, 1, &state->tmp1, msparse, 1.0, deltaxy, 0, _state); /* * Compute Xh = -inv(HWave)*BhWave */ for(i=0; i<=nmain-1; i++) { state->deltaxy.ptr.p_double[i] = -state->deltaxy.ptr.p_double[i]; } rmatrixtrsv(nmain, &state->factdensehaug, 0, 0, ae_false, ae_false, 0, &state->deltaxy, 0, _state); rmatrixtrsv(nmain, &state->factdensehaug, 0, 0, ae_false, ae_false, 1, &state->deltaxy, 0, _state); /* * Compute Y = inv(EWave)*(ByWave-Ah*Xh) */ sparsegemv(&state->sparseamain, -1.0, 0, deltaxy, 0, 1.0, deltaxy, n, _state); rmatrixgemv(mdense, nmain, -1.0, &state->denseamain, 0, 0, 0, deltaxy, 0, 1.0, deltaxy, n+msparse, _state); for(i=0; i<=m-1; i++) { deltaxy->ptr.p_double[n+i] = deltaxy->ptr.p_double[n+i]/state->facteffectivee.ptr.p_double[i]; } /* * Compute Xz = -(BzWave - inv(Dz)*Az'*y) */ rvectorsetlengthatleast(&state->tmp0, nslack, _state); for(i=0; i<=nslack-1; i++) { state->tmp0.ptr.p_double[i] = (double)(0); } sparsegemv(&state->combinedaslack, 1.0, 1, deltaxy, n, 1.0, &state->tmp0, 0, _state); for(i=0; i<=nslack-1; i++) { state->deltaxy.ptr.p_double[nmain+i] = -(state->deltaxy.ptr.p_double[nmain+i]-state->factinvdz.ptr.p_double[i]*state->tmp0.ptr.p_double[i]); } /* * Done */ return; } /* * Sparse solving */ if( state->factorizationtype==1 ) { /* * Solve sparse KKT system given by its triangular factorization */ for(i=0; i<=n-1; i++) { ae_assert(state->factsparsekkt.didx.ptr.p_int[i]+1==state->factsparsekkt.uidx.ptr.p_int[i]&&state->factsparsekkt.vals.ptr.p_double[state->factsparsekkt.didx.ptr.p_int[i]]!=0.0, "VIPMSolve: degenerate KKT system encountered", _state); } for(i=0; i<=n+m-1; i++) { j = state->factsparsekktpivp.ptr.p_int[i]; v = deltaxy->ptr.p_double[i]; deltaxy->ptr.p_double[i] = deltaxy->ptr.p_double[j]; deltaxy->ptr.p_double[j] = v; } sparsetrsv(&state->factsparsekkt, ae_false, ae_true, 0, deltaxy, _state); sparsetrsv(&state->factsparsekkt, ae_true, ae_false, 0, deltaxy, _state); for(i=n+m-1; i>=0; i--) { j = state->factsparsekktpivq.ptr.p_int[i]; v = deltaxy->ptr.p_double[i]; deltaxy->ptr.p_double[i] = deltaxy->ptr.p_double[j]; deltaxy->ptr.p_double[j] = v; } /* * Done */ return; } /* * */ ae_assert(ae_false, "VIPMSolve: integrity check failed - unexpected factorization", _state); } /************************************************************************* Generates precomputed temporary vectors and KKT factorization at the beginning of the current iteration. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmprecomputenewtonfactorization(vipmstate* state, vipmvars* v0, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; n = state->n; m = state->mdense+state->msparse; rvectorsetlengthatleast(&state->diagginvz, n, _state); rvectorsetlengthatleast(&state->diagzinvg, n, _state); rvectorsetlengthatleast(&state->diagtinvs, n, _state); rvectorsetlengthatleast(&state->diagsinvt, n, _state); rvectorsetlengthatleast(&state->diagpinvq, m, _state); rvectorsetlengthatleast(&state->diagqinvp, m, _state); rvectorsetlengthatleast(&state->diagvinvw, m, _state); rvectorsetlengthatleast(&state->diagd, n, _state); rvectorsetlengthatleast(&state->diage, m, _state); /* * Handle temporary matrices arising due to box constraints */ for(i=0; i<=n-1; i++) { /* * Lower bound: G*inv(Z) and Z*inv(G) */ if( state->hasbndl.ptr.p_bool[i] ) { ae_assert(v0->g.ptr.p_double[i]>0.0&&v0->z.ptr.p_double[i]>0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - G[i]<=0 or Z[i]<=0", _state); state->diagginvz.ptr.p_double[i] = v0->g.ptr.p_double[i]/v0->z.ptr.p_double[i]; state->diagzinvg.ptr.p_double[i] = v0->z.ptr.p_double[i]/v0->g.ptr.p_double[i]; } else { ae_assert(v0->g.ptr.p_double[i]==0.0&&v0->z.ptr.p_double[i]==0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - G[i]<>0 or Z[i]<>0 for absent lower bound", _state); state->diagginvz.ptr.p_double[i] = 0.0; state->diagzinvg.ptr.p_double[i] = 0.0; } /* * Upper bound: T*inv(S) and S*inv(T) */ if( state->hasbndu.ptr.p_bool[i] ) { ae_assert(v0->t.ptr.p_double[i]>0.0&&v0->s.ptr.p_double[i]>0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - T[i]<=0 or S[i]<=0", _state); state->diagtinvs.ptr.p_double[i] = v0->t.ptr.p_double[i]/v0->s.ptr.p_double[i]; state->diagsinvt.ptr.p_double[i] = v0->s.ptr.p_double[i]/v0->t.ptr.p_double[i]; } else { ae_assert(v0->t.ptr.p_double[i]==0.0&&v0->s.ptr.p_double[i]==0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - T[i]<>0 or S[i]<>0 for absent upper bound", _state); state->diagtinvs.ptr.p_double[i] = 0.0; state->diagsinvt.ptr.p_double[i] = 0.0; } /* * Diagonal term D */ state->diagd.ptr.p_double[i] = state->diagzinvg.ptr.p_double[i]+state->diagsinvt.ptr.p_double[i]; } /* * Handle temporary matrices arising due to linear constraints: with lower bound B[] * or with lower and upper bounds. */ for(i=0; i<=m-1; i++) { /* * Lower bound: always present */ ae_assert(v0->v.ptr.p_double[i]>0.0&&v0->w.ptr.p_double[i]>0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - V[i]<=0 or W[i]<=0", _state); state->diagvinvw.ptr.p_double[i] = v0->v.ptr.p_double[i]/v0->w.ptr.p_double[i]; /* * Upper bound */ if( state->hasr.ptr.p_bool[i] ) { ae_assert(v0->p.ptr.p_double[i]>0.0&&v0->q.ptr.p_double[i]>0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - P[i]<=0 or Q[i]<=0", _state); state->diagpinvq.ptr.p_double[i] = v0->p.ptr.p_double[i]/v0->q.ptr.p_double[i]; state->diagqinvp.ptr.p_double[i] = v0->q.ptr.p_double[i]/v0->p.ptr.p_double[i]; } else { ae_assert(v0->p.ptr.p_double[i]==0.0&&v0->q.ptr.p_double[i]==0.0, "VIPMPrecomputeNewtonFactorization: integrity failure - P[i]<>0 or Q[i]<>0 for absent range of linear constraint", _state); state->diagpinvq.ptr.p_double[i] = 0.0; state->diagqinvp.ptr.p_double[i] = 0.0; } /* * Diagonal term E */ state->diage.ptr.p_double[i] = 1/(state->diagvinvw.ptr.p_double[i]+state->diagqinvp.ptr.p_double[i]); } /* * Perform factorization */ vipmsolver_vipmfactorize(state, 1.0, &state->diagd, 1.0, &state->diage, state->regeps, state->regeps, _state); } /************************************************************************* Compute VIPM step by solving KKT system -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmcomputestepdirection(vipmstate* state, vipmvars* v0, double mu, vipmvars* vd, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; n = state->n; m = state->mdense+state->msparse; /* * Allocate */ rvectorsetlengthatleast(&state->rhssigma, n, _state); rvectorsetlengthatleast(&state->rhsbeta, m, _state); rvectorsetlengthatleast(&state->rhsrho, m, _state); rvectorsetlengthatleast(&state->rhsnu, n, _state); rvectorsetlengthatleast(&state->rhstau, n, _state); rvectorsetlengthatleast(&state->rhsalpha, m, _state); rvectorsetlengthatleast(&state->rhsgammaz, n, _state); rvectorsetlengthatleast(&state->rhsgammas, n, _state); rvectorsetlengthatleast(&state->rhsgammaw, m, _state); rvectorsetlengthatleast(&state->rhsgammaq, m, _state); rvectorsetlengthatleast(&state->rhsnucap, n, _state); rvectorsetlengthatleast(&state->rhstaucap, n, _state); rvectorsetlengthatleast(&state->rhsbetacap, m, _state); rvectorsetlengthatleast(&state->rhsalphacap, m, _state); /* * Compute products H*x, A*x, A^T*y * We compute these products in one location for the sake of simplicity. */ vipmsolver_vipmmultiply(state, &v0->x, &v0->y, &state->tmphx, &state->tmpax, &state->tmpaty, _state); /* * RhsRho = b - A*x + w */ for(i=0; i<=m-1; i++) { state->rhsrho.ptr.p_double[i] = state->b.ptr.p_double[i]-state->tmpax.ptr.p_double[i]+v0->w.ptr.p_double[i]; } /* * RhsNu = l - x + g */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { /* * Lower bound is present */ state->rhsnu.ptr.p_double[i] = state->bndl.ptr.p_double[i]-v0->x.ptr.p_double[i]+v0->g.ptr.p_double[i]; } else { /* * Lower bound is absent, g[i] = 0 */ ae_assert(v0->g.ptr.p_double[i]==0.0, "VIPMComputeStep: G[i]<>0 for absent constraint", _state); state->rhsnu.ptr.p_double[i] = (double)(0); } } /* * RhsTau = u - x - t */ for(i=0; i<=n-1; i++) { if( state->hasbndu.ptr.p_bool[i] ) { /* * Upper bound is present */ state->rhstau.ptr.p_double[i] = state->bndu.ptr.p_double[i]-v0->x.ptr.p_double[i]-v0->t.ptr.p_double[i]; } else { /* * Upper bound is absent, t[i] = 0 */ ae_assert(v0->t.ptr.p_double[i]==0.0, "VIPMComputeStep: T[i]<>0 for absent constraint", _state); state->rhstau.ptr.p_double[i] = (double)(0); } } /* * RhsAlpha = r - w - p */ for(i=0; i<=m-1; i++) { if( state->hasr.ptr.p_bool[i] ) { state->rhsalpha.ptr.p_double[i] = state->r.ptr.p_double[i]-v0->w.ptr.p_double[i]-v0->p.ptr.p_double[i]; } else { state->rhsalpha.ptr.p_double[i] = (double)(0); } } /* * RhsSigma = c - A^T*y - z + s + H*x */ for(i=0; i<=n-1; i++) { state->rhssigma.ptr.p_double[i] = state->c.ptr.p_double[i]-state->tmpaty.ptr.p_double[i]+state->tmphx.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i] ) { state->rhssigma.ptr.p_double[i] = state->rhssigma.ptr.p_double[i]-v0->z.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { state->rhssigma.ptr.p_double[i] = state->rhssigma.ptr.p_double[i]+v0->s.ptr.p_double[i]; } } /* * RhsBeta = y + q - v */ for(i=0; i<=m-1; i++) { state->rhsbeta.ptr.p_double[i] = v0->y.ptr.p_double[i]-v0->v.ptr.p_double[i]; if( state->hasr.ptr.p_bool[i] ) { state->rhsbeta.ptr.p_double[i] = state->rhsbeta.ptr.p_double[i]+v0->q.ptr.p_double[i]; } } /* * RhsGammaZ = mu*inv(G)*e - z - inv(G)*DELTAG*deltaZ */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { ae_assert(v0->g.ptr.p_double[i]>0.0, "VIPMComputeStep: G[i]<=0", _state); state->rhsgammaz.ptr.p_double[i] = mu/v0->g.ptr.p_double[i]-v0->z.ptr.p_double[i]-vd->g.ptr.p_double[i]*vd->z.ptr.p_double[i]/v0->g.ptr.p_double[i]; } else { ae_assert(v0->g.ptr.p_double[i]==0.0, "VIPMComputeStep: G[i]<>0 for absent constraint", _state); ae_assert(v0->z.ptr.p_double[i]==0.0, "VIPMComputeStep: Z[i]<>0 for absent constraint", _state); state->rhsgammaz.ptr.p_double[i] = (double)(0); } } /* * RhsGammaW = mu*inv(V)*e - w - inv(V)*DELTAV*deltaW */ for(i=0; i<=m-1; i++) { ae_assert(v0->v.ptr.p_double[i]>0.0, "VIPMComputeStep: V[i]<=0", _state); state->rhsgammaw.ptr.p_double[i] = mu/v0->v.ptr.p_double[i]-v0->w.ptr.p_double[i]-vd->v.ptr.p_double[i]*vd->w.ptr.p_double[i]/v0->v.ptr.p_double[i]; } /* * RhsGammaS = mu*inv(T)*e - s - inv(T)*DELTAT*deltaS */ for(i=0; i<=n-1; i++) { if( state->hasbndu.ptr.p_bool[i] ) { /* * Upper bound is present */ ae_assert(v0->t.ptr.p_double[i]>0.0, "VIPMComputeStep: T[i]<=0", _state); state->rhsgammas.ptr.p_double[i] = mu/v0->t.ptr.p_double[i]-v0->s.ptr.p_double[i]-vd->t.ptr.p_double[i]*vd->s.ptr.p_double[i]/v0->t.ptr.p_double[i]; } else { /* * Upper bound is absent */ ae_assert(v0->t.ptr.p_double[i]==0.0, "VIPMComputeStep: T[i]<>0 for absent constraint", _state); ae_assert(v0->s.ptr.p_double[i]==0.0, "VIPMComputeStep: S[i]<>0 for absent constraint", _state); state->rhsgammas.ptr.p_double[i] = (double)(0); } } /* * GammaQ = mu*inv(P)*e - q - inv(P)*DELTAP*deltaQ */ for(i=0; i<=m-1; i++) { if( state->hasr.ptr.p_bool[i] ) { ae_assert(v0->p.ptr.p_double[i]>0.0, "VIPMComputeStep: P[i]<=0", _state); state->rhsgammaq.ptr.p_double[i] = mu/v0->p.ptr.p_double[i]-v0->q.ptr.p_double[i]-vd->p.ptr.p_double[i]*vd->q.ptr.p_double[i]/v0->p.ptr.p_double[i]; } else { ae_assert(v0->p.ptr.p_double[i]==0.0, "VIPMComputeStep: P[i]<>0 for absent range", _state); ae_assert(v0->q.ptr.p_double[i]==0.0, "VIPMComputeStep: Q[i]<>0 for absent range", _state); state->rhsgammaq.ptr.p_double[i] = (double)(0); } } /* * RhsAlphaCap = RhsAlpha - PInvQ*GammaQ */ for(i=0; i<=m-1; i++) { state->rhsalphacap.ptr.p_double[i] = state->rhsalpha.ptr.p_double[i]-state->diagpinvq.ptr.p_double[i]*state->rhsgammaq.ptr.p_double[i]; } /* * RhsNuCap = RhsNu + GinvZ*GammaZ */ for(i=0; i<=n-1; i++) { state->rhsnucap.ptr.p_double[i] = state->rhsnu.ptr.p_double[i]+state->diagginvz.ptr.p_double[i]*state->rhsgammaz.ptr.p_double[i]; } /* * RhsTauCap = RhsTau - TInvS*GammaS */ for(i=0; i<=n-1; i++) { state->rhstaucap.ptr.p_double[i] = state->rhstau.ptr.p_double[i]-state->diagtinvs.ptr.p_double[i]*state->rhsgammas.ptr.p_double[i]; } /* * RhsBetaCap = RhsBeta - VInvW*GammaW */ for(i=0; i<=m-1; i++) { state->rhsbetacap.ptr.p_double[i] = state->rhsbeta.ptr.p_double[i]-state->diagvinvw.ptr.p_double[i]*state->rhsgammaw.ptr.p_double[i]; } /* * Solve KKT system */ ae_vector_set_length(&state->deltaxy, n+m, _state); for(i=0; i<=n-1; i++) { state->deltaxy.ptr.p_double[i] = state->rhssigma.ptr.p_double[i]-state->diagzinvg.ptr.p_double[i]*state->rhsnucap.ptr.p_double[i]-state->diagsinvt.ptr.p_double[i]*state->rhstaucap.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { state->deltaxy.ptr.p_double[n+i] = state->rhsrho.ptr.p_double[i]-state->diage.ptr.p_double[i]*(state->rhsbetacap.ptr.p_double[i]-state->diagqinvp.ptr.p_double[i]*state->rhsalphacap.ptr.p_double[i]); } vipmsolver_vipmsolve(state, &state->deltaxy, _state); /* * Perform backsubstitution */ for(i=0; i<=n-1; i++) { vd->x.ptr.p_double[i] = state->deltaxy.ptr.p_double[i]; vd->s.ptr.p_double[i] = state->diagsinvt.ptr.p_double[i]*(vd->x.ptr.p_double[i]-state->rhstaucap.ptr.p_double[i]); vd->z.ptr.p_double[i] = state->diagzinvg.ptr.p_double[i]*(state->rhsnucap.ptr.p_double[i]-vd->x.ptr.p_double[i]); vd->g.ptr.p_double[i] = state->diagginvz.ptr.p_double[i]*(state->rhsgammaz.ptr.p_double[i]-vd->z.ptr.p_double[i]); vd->t.ptr.p_double[i] = state->diagtinvs.ptr.p_double[i]*(state->rhsgammas.ptr.p_double[i]-vd->s.ptr.p_double[i]); } for(i=0; i<=m-1; i++) { vd->y.ptr.p_double[i] = state->deltaxy.ptr.p_double[n+i]; vd->w.ptr.p_double[i] = -state->diage.ptr.p_double[i]*(state->rhsbetacap.ptr.p_double[i]-state->diagqinvp.ptr.p_double[i]*state->rhsalphacap.ptr.p_double[i]+vd->y.ptr.p_double[i]); vd->q.ptr.p_double[i] = state->diagqinvp.ptr.p_double[i]*(vd->w.ptr.p_double[i]-state->rhsalphacap.ptr.p_double[i]); vd->v.ptr.p_double[i] = state->diagvinvw.ptr.p_double[i]*(state->rhsgammaw.ptr.p_double[i]-vd->w.ptr.p_double[i]); vd->p.ptr.p_double[i] = state->diagpinvq.ptr.p_double[i]*(state->rhsgammaq.ptr.p_double[i]-vd->q.ptr.p_double[i]); } } /************************************************************************* This function estimates primal and dual step lengths (subject to step decay parameter, which should be in [0,1] range). Current version returns same step lengths for primal and dual steps. INPUT PARAMETERS: State - solver state V0 - current point (we ignore one stored in State.Current) VS - step direction StepDecay - decay parameter, the step is multiplied by this coefficient. 1.0 corresponds to full step length being returned. Values in (0,1] range. OUTPUT PARAMETERS: AlphaP - primal step (after applying decay coefficient) AlphaD - dual step (after applying decay coefficient) -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmcomputesteplength(vipmstate* state, vipmvars* v0, vipmvars* vs, double stepdecay, double* alphap, double* alphad, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; double alpha; *alphap = 0; *alphad = 0; n = state->n; m = state->mdense+state->msparse; ae_assert(n==v0->n&&m==v0->m, "VIPMComputeStepLength: sizes mismatch", _state); *alphap = (double)(1); *alphad = (double)(1); for(i=0; i<=n-1; i++) { /* * Primal */ if( state->hasbndl.ptr.p_bool[i] ) { *alphap = safeminposrv(v0->g.ptr.p_double[i], -vs->g.ptr.p_double[i], *alphap, _state); } if( state->hasbndu.ptr.p_bool[i] ) { *alphap = safeminposrv(v0->t.ptr.p_double[i], -vs->t.ptr.p_double[i], *alphap, _state); } /* * Dual */ *alphad = safeminposrv(v0->z.ptr.p_double[i], -vs->z.ptr.p_double[i], *alphad, _state); *alphad = safeminposrv(v0->s.ptr.p_double[i], -vs->s.ptr.p_double[i], *alphad, _state); } for(i=0; i<=m-1; i++) { /* * Primal */ *alphap = safeminposrv(v0->w.ptr.p_double[i], -vs->w.ptr.p_double[i], *alphap, _state); *alphap = safeminposrv(v0->p.ptr.p_double[i], -vs->p.ptr.p_double[i], *alphap, _state); /* * Dual */ *alphad = safeminposrv(v0->v.ptr.p_double[i], -vs->v.ptr.p_double[i], *alphad, _state); *alphad = safeminposrv(v0->q.ptr.p_double[i], -vs->q.ptr.p_double[i], *alphad, _state); } /* * Because we solve QP problem, step length have to be same for primal and dual variables */ alpha = ae_minreal(*alphap, *alphad, _state); /* * Apply decay */ *alphap = stepdecay*alpha; *alphad = stepdecay*alpha; } /************************************************************************* Evaluate progress so far, sets following fields of State: * ErrP and ErrD are set to primal and dual infeasibilities computed in L2 norm Also outputs trace data, if requested to do so. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ static void vipmsolver_vipmevaluateprogress(vipmstate* state, ae_bool dotrace, ae_bool dodetailedtrace, double mu, double muaff, double sigma, double alphap, double alphad, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; double v; ae_int_t cntp2; ae_int_t cntd2; n = state->n; m = state->mdense+state->msparse; rvectorsetlengthatleast(&state->evalprogressg, n, _state); /* * Compute primal and dual infeasibilities */ vipmsolver_vipmmultiply(state, &state->current.x, &state->current.y, &state->tmphx, &state->tmpax, &state->tmpaty, _state); cntp2 = 0; state->errp2 = (double)(0); state->errpinf = (double)(0); for(i=0; i<=m-1; i++) { v = state->tmpax.ptr.p_double[i]-state->current.w.ptr.p_double[i]-state->b.ptr.p_double[i]; state->errp2 = state->errp2+v*v; state->errpinf = ae_maxreal(state->errpinf, ae_fabs(v, _state), _state); inc(&cntp2, _state); if( state->hasr.ptr.p_bool[i] ) { v = state->current.w.ptr.p_double[i]+state->current.p.ptr.p_double[i]-state->r.ptr.p_double[i]; state->errp2 = state->errp2+v*v; state->errpinf = ae_maxreal(state->errpinf, ae_fabs(v, _state), _state); inc(&cntp2, _state); } } for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { v = state->current.x.ptr.p_double[i]-state->current.g.ptr.p_double[i]-state->bndl.ptr.p_double[i]; state->errp2 = state->errp2+v*v; state->errpinf = ae_maxreal(state->errpinf, ae_fabs(v, _state), _state); inc(&cntp2, _state); } if( state->hasbndu.ptr.p_bool[i] ) { v = state->current.x.ptr.p_double[i]+state->current.t.ptr.p_double[i]-state->bndu.ptr.p_double[i]; state->errp2 = state->errp2+v*v; state->errpinf = ae_maxreal(state->errpinf, ae_fabs(v, _state), _state); inc(&cntp2, _state); } } state->errp2 = ae_sqrt(state->errp2/coalesce((double)(cntp2), (double)(1), _state), _state); cntd2 = 0; state->errd2 = (double)(0); state->errdinf = (double)(0); for(i=0; i<=n-1; i++) { v = state->tmphx.ptr.p_double[i]+state->c.ptr.p_double[i]-state->tmpaty.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i] ) { v = v-state->current.z.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { v = v+state->current.s.ptr.p_double[i]; } state->evalprogressg.ptr.p_double[i] = v; state->errd2 = state->errd2+v*v; state->errdinf = ae_maxreal(state->errdinf, ae_fabs(v, _state), _state); inc(&cntd2, _state); } for(i=0; i<=m-1; i++) { v = state->current.y.ptr.p_double[i]-state->current.v.ptr.p_double[i]; if( state->hasr.ptr.p_bool[i] ) { v = v+state->current.q.ptr.p_double[i]; } state->errd2 = state->errd2+v*v; state->errdinf = ae_maxreal(state->errdinf, ae_fabs(v, _state), _state); inc(&cntd2, _state); } state->errd2 = ae_sqrt(state->errd2/coalesce((double)(cntd2), (double)(1), _state), _state); /* * Trace */ if( dotrace ) { ae_trace("mu_init = %0.3e (at the beginning)\n", (double)(mu)); ae_trace("mu_aff = %0.3e (by affine scaling step)\n", (double)(muaff)); ae_trace("sigma = %0.3e (centering parameter)\n", (double)(sigma)); ae_trace("errP = %0.3e (primal infeasibility, 2-norm)\n", (double)(state->errp2)); ae_trace("errD = %0.3e (dual infeasibility, 2-norm)\n", (double)(state->errd2)); ae_trace("alphaP = %0.3e (primal step)\n", (double)(alphap)); ae_trace("alphaD = %0.3e (dual step)\n", (double)(alphad)); ae_trace("|X|=%0.1e, |G|=%0.1e, |W|=%0.1e, |T|=%0.1e, |P|=%0.1e\n", (double)(vipmsolver_nrminf(&state->current.x, n, _state)), (double)(vipmsolver_nrminf(&state->current.g, n, _state)), (double)(vipmsolver_nrminf(&state->current.w, m, _state)), (double)(vipmsolver_nrminf(&state->current.t, n, _state)), (double)(vipmsolver_nrminf(&state->current.p, m, _state))); ae_trace("|Y|=%0.1e, |Z|=%0.1e, |V|=%0.1e, |S|=%0.1e, |Q|=%0.1e\n", (double)(vipmsolver_nrminf(&state->current.y, m, _state)), (double)(vipmsolver_nrminf(&state->current.z, n, _state)), (double)(vipmsolver_nrminf(&state->current.v, m, _state)), (double)(vipmsolver_nrminf(&state->current.s, n, _state)), (double)(vipmsolver_nrminf(&state->current.q, m, _state))); if( dodetailedtrace ) { ae_trace("--- printing raw data (prior to applying variable scales and shifting by XOrigin) ------------------\n"); ae_trace("X (raw) = "); tracevectorunscaledunshiftedautoprec(&state->current.x, n, &state->scl, ae_true, &state->xorigin, ae_true, _state); ae_trace("\n"); ae_trace("--- printing scaled data (after applying variable scales and shifting by XOrigin) ------------------\n"); ae_trace("> reporting X, Lagrangian gradient\n"); ae_trace("Xnew = "); tracevectorautoprec(&state->current.x, 0, n, _state); ae_trace("\n"); ae_trace("Lag-grad = "); tracevectorautoprec(&state->evalprogressg, 0, n, _state); ae_trace("\n"); ae_trace("> primal slacks and dual multipliers for box constraints\n"); ae_trace("G (L prim slck) = "); tracevectorautoprec(&state->current.g, 0, n, _state); ae_trace("\n"); ae_trace("Z (L dual mult) = "); tracevectorautoprec(&state->current.z, 0, n, _state); ae_trace("\n"); ae_trace("T (U prim slck) = "); tracevectorautoprec(&state->current.t, 0, n, _state); ae_trace("\n"); ae_trace("S (U dual mult) = "); tracevectorautoprec(&state->current.s, 0, n, _state); ae_trace("\n"); ae_trace("> primal slacks and dual multipliers for linear constraints, B/R stand for B<=Ax<=B+R\n"); ae_trace("Y (lag mult) = "); tracevectorautoprec(&state->current.y, 0, m, _state); ae_trace("\n"); ae_trace("W (B prim slck) = "); tracevectorautoprec(&state->current.w, 0, m, _state); ae_trace("\n"); ae_trace("V (B dual mult) = "); tracevectorautoprec(&state->current.v, 0, m, _state); ae_trace("\n"); ae_trace("P (R prim slck) = "); tracevectorautoprec(&state->current.p, 0, m, _state); ae_trace("\n"); ae_trace("Q (R dual mult) = "); tracevectorautoprec(&state->current.q, 0, m, _state); ae_trace("\n"); } ae_trace("\n"); } } void _vipmvars_init(void* _p, ae_state *_state, ae_bool make_automatic) { vipmvars *p = (vipmvars*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->w, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->t, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->p, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->y, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->z, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->v, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->q, 0, DT_REAL, _state, make_automatic); } void _vipmvars_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { vipmvars *dst = (vipmvars*)_dst; vipmvars *src = (vipmvars*)_src; dst->n = src->n; dst->m = src->m; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); ae_vector_init_copy(&dst->w, &src->w, _state, make_automatic); ae_vector_init_copy(&dst->t, &src->t, _state, make_automatic); ae_vector_init_copy(&dst->p, &src->p, _state, make_automatic); ae_vector_init_copy(&dst->y, &src->y, _state, make_automatic); ae_vector_init_copy(&dst->z, &src->z, _state, make_automatic); ae_vector_init_copy(&dst->v, &src->v, _state, make_automatic); ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->q, &src->q, _state, make_automatic); } void _vipmvars_clear(void* _p) { vipmvars *p = (vipmvars*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x); ae_vector_clear(&p->g); ae_vector_clear(&p->w); ae_vector_clear(&p->t); ae_vector_clear(&p->p); ae_vector_clear(&p->y); ae_vector_clear(&p->z); ae_vector_clear(&p->v); ae_vector_clear(&p->s); ae_vector_clear(&p->q); } void _vipmvars_destroy(void* _p) { vipmvars *p = (vipmvars*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); ae_vector_destroy(&p->w); ae_vector_destroy(&p->t); ae_vector_destroy(&p->p); ae_vector_destroy(&p->y); ae_vector_destroy(&p->z); ae_vector_destroy(&p->v); ae_vector_destroy(&p->s); ae_vector_destroy(&p->q); } void _vipmstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { vipmstate *p = (vipmstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->scl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invscl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xorigin, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->c, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->denseh, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparseh, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rawbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rawbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->denseafull, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->denseamain, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparseafull, _state, make_automatic); _sparsematrix_init(&p->sparseamain, _state, make_automatic); _sparsematrix_init(&p->combinedaslack, _state, make_automatic); ae_vector_init(&p->ascales, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->r, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasr, 0, DT_BOOL, _state, make_automatic); _vipmvars_init(&p->current, _state, make_automatic); _vipmvars_init(&p->trial, _state, make_automatic); _vipmvars_init(&p->deltaaff, _state, make_automatic); _vipmvars_init(&p->deltacorr, _state, make_automatic); ae_vector_init(&p->diagginvz, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagzinvg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagtinvs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagsinvt, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagvinvw, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagpinvq, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagqinvp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagd, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diage, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->factdensehaug, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->factdh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->factdz, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->factinvdz, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->facteffectivee, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->factsparsekkttmpl, _state, make_automatic); _sparsematrix_init(&p->factsparsekkt, _state, make_automatic); ae_vector_init(&p->factsparsekktpivp, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->factsparsekktpivq, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->rhsrho, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsnu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhstau, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsalpha, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhssigma, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsbeta, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsgammaz, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsgammas, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsgammaw, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsgammaq, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsalphacap, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsbetacap, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhsnucap, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhstaucap, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->deltaxy, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmphx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpax, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpaty, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dummyr, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp2, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmpr2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->evalprogressg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpi, 0, DT_INT, _state, make_automatic); _sparsematrix_init(&p->tmpsparse0, _state, make_automatic); _sparsematrix_init(&p->tmpsparse1, _state, make_automatic); } void _vipmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { vipmstate *dst = (vipmstate*)_dst; vipmstate *src = (vipmstate*)_src; dst->n = src->n; dst->nmain = src->nmain; dst->regeps = src->regeps; dst->epsp = src->epsp; dst->epsd = src->epsd; dst->epsgap = src->epsgap; dst->islinear = src->islinear; ae_vector_init_copy(&dst->scl, &src->scl, _state, make_automatic); ae_vector_init_copy(&dst->invscl, &src->invscl, _state, make_automatic); ae_vector_init_copy(&dst->xorigin, &src->xorigin, _state, make_automatic); dst->targetscale = src->targetscale; ae_vector_init_copy(&dst->c, &src->c, _state, make_automatic); ae_matrix_init_copy(&dst->denseh, &src->denseh, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseh, &src->sparseh, _state, make_automatic); dst->hkind = src->hkind; ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->rawbndl, &src->rawbndl, _state, make_automatic); ae_vector_init_copy(&dst->rawbndu, &src->rawbndu, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_matrix_init_copy(&dst->denseafull, &src->denseafull, _state, make_automatic); ae_matrix_init_copy(&dst->denseamain, &src->denseamain, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseafull, &src->sparseafull, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseamain, &src->sparseamain, _state, make_automatic); _sparsematrix_init_copy(&dst->combinedaslack, &src->combinedaslack, _state, make_automatic); ae_vector_init_copy(&dst->ascales, &src->ascales, _state, make_automatic); ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic); ae_vector_init_copy(&dst->r, &src->r, _state, make_automatic); ae_vector_init_copy(&dst->hasr, &src->hasr, _state, make_automatic); dst->mdense = src->mdense; dst->msparse = src->msparse; _vipmvars_init_copy(&dst->current, &src->current, _state, make_automatic); _vipmvars_init_copy(&dst->trial, &src->trial, _state, make_automatic); _vipmvars_init_copy(&dst->deltaaff, &src->deltaaff, _state, make_automatic); _vipmvars_init_copy(&dst->deltacorr, &src->deltacorr, _state, make_automatic); dst->errp2 = src->errp2; dst->errd2 = src->errd2; dst->errpinf = src->errpinf; dst->errdinf = src->errdinf; dst->repiterationscount = src->repiterationscount; dst->repncholesky = src->repncholesky; dst->factorizationtype = src->factorizationtype; dst->factorizationpoweredup = src->factorizationpoweredup; dst->factorizationpresent = src->factorizationpresent; ae_vector_init_copy(&dst->diagginvz, &src->diagginvz, _state, make_automatic); ae_vector_init_copy(&dst->diagzinvg, &src->diagzinvg, _state, make_automatic); ae_vector_init_copy(&dst->diagtinvs, &src->diagtinvs, _state, make_automatic); ae_vector_init_copy(&dst->diagsinvt, &src->diagsinvt, _state, make_automatic); ae_vector_init_copy(&dst->diagvinvw, &src->diagvinvw, _state, make_automatic); ae_vector_init_copy(&dst->diagpinvq, &src->diagpinvq, _state, make_automatic); ae_vector_init_copy(&dst->diagqinvp, &src->diagqinvp, _state, make_automatic); ae_vector_init_copy(&dst->diagd, &src->diagd, _state, make_automatic); ae_vector_init_copy(&dst->diage, &src->diage, _state, make_automatic); ae_matrix_init_copy(&dst->factdensehaug, &src->factdensehaug, _state, make_automatic); ae_vector_init_copy(&dst->factdh, &src->factdh, _state, make_automatic); ae_vector_init_copy(&dst->factdz, &src->factdz, _state, make_automatic); ae_vector_init_copy(&dst->factinvdz, &src->factinvdz, _state, make_automatic); ae_vector_init_copy(&dst->facteffectivee, &src->facteffectivee, _state, make_automatic); _sparsematrix_init_copy(&dst->factsparsekkttmpl, &src->factsparsekkttmpl, _state, make_automatic); _sparsematrix_init_copy(&dst->factsparsekkt, &src->factsparsekkt, _state, make_automatic); ae_vector_init_copy(&dst->factsparsekktpivp, &src->factsparsekktpivp, _state, make_automatic); ae_vector_init_copy(&dst->factsparsekktpivq, &src->factsparsekktpivq, _state, make_automatic); ae_vector_init_copy(&dst->rhsrho, &src->rhsrho, _state, make_automatic); ae_vector_init_copy(&dst->rhsnu, &src->rhsnu, _state, make_automatic); ae_vector_init_copy(&dst->rhstau, &src->rhstau, _state, make_automatic); ae_vector_init_copy(&dst->rhsalpha, &src->rhsalpha, _state, make_automatic); ae_vector_init_copy(&dst->rhssigma, &src->rhssigma, _state, make_automatic); ae_vector_init_copy(&dst->rhsbeta, &src->rhsbeta, _state, make_automatic); ae_vector_init_copy(&dst->rhsgammaz, &src->rhsgammaz, _state, make_automatic); ae_vector_init_copy(&dst->rhsgammas, &src->rhsgammas, _state, make_automatic); ae_vector_init_copy(&dst->rhsgammaw, &src->rhsgammaw, _state, make_automatic); ae_vector_init_copy(&dst->rhsgammaq, &src->rhsgammaq, _state, make_automatic); ae_vector_init_copy(&dst->rhsalphacap, &src->rhsalphacap, _state, make_automatic); ae_vector_init_copy(&dst->rhsbetacap, &src->rhsbetacap, _state, make_automatic); ae_vector_init_copy(&dst->rhsnucap, &src->rhsnucap, _state, make_automatic); ae_vector_init_copy(&dst->rhstaucap, &src->rhstaucap, _state, make_automatic); ae_vector_init_copy(&dst->deltaxy, &src->deltaxy, _state, make_automatic); ae_vector_init_copy(&dst->tmphx, &src->tmphx, _state, make_automatic); ae_vector_init_copy(&dst->tmpax, &src->tmpax, _state, make_automatic); ae_vector_init_copy(&dst->tmpaty, &src->tmpaty, _state, make_automatic); ae_vector_init_copy(&dst->dummyr, &src->dummyr, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_matrix_init_copy(&dst->tmpr2, &src->tmpr2, _state, make_automatic); ae_vector_init_copy(&dst->evalprogressg, &src->evalprogressg, _state, make_automatic); ae_vector_init_copy(&dst->tmpi, &src->tmpi, _state, make_automatic); _sparsematrix_init_copy(&dst->tmpsparse0, &src->tmpsparse0, _state, make_automatic); _sparsematrix_init_copy(&dst->tmpsparse1, &src->tmpsparse1, _state, make_automatic); } void _vipmstate_clear(void* _p) { vipmstate *p = (vipmstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->scl); ae_vector_clear(&p->invscl); ae_vector_clear(&p->xorigin); ae_vector_clear(&p->c); ae_matrix_clear(&p->denseh); _sparsematrix_clear(&p->sparseh); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->rawbndl); ae_vector_clear(&p->rawbndu); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_matrix_clear(&p->denseafull); ae_matrix_clear(&p->denseamain); _sparsematrix_clear(&p->sparseafull); _sparsematrix_clear(&p->sparseamain); _sparsematrix_clear(&p->combinedaslack); ae_vector_clear(&p->ascales); ae_vector_clear(&p->b); ae_vector_clear(&p->r); ae_vector_clear(&p->hasr); _vipmvars_clear(&p->current); _vipmvars_clear(&p->trial); _vipmvars_clear(&p->deltaaff); _vipmvars_clear(&p->deltacorr); ae_vector_clear(&p->diagginvz); ae_vector_clear(&p->diagzinvg); ae_vector_clear(&p->diagtinvs); ae_vector_clear(&p->diagsinvt); ae_vector_clear(&p->diagvinvw); ae_vector_clear(&p->diagpinvq); ae_vector_clear(&p->diagqinvp); ae_vector_clear(&p->diagd); ae_vector_clear(&p->diage); ae_matrix_clear(&p->factdensehaug); ae_vector_clear(&p->factdh); ae_vector_clear(&p->factdz); ae_vector_clear(&p->factinvdz); ae_vector_clear(&p->facteffectivee); _sparsematrix_clear(&p->factsparsekkttmpl); _sparsematrix_clear(&p->factsparsekkt); ae_vector_clear(&p->factsparsekktpivp); ae_vector_clear(&p->factsparsekktpivq); ae_vector_clear(&p->rhsrho); ae_vector_clear(&p->rhsnu); ae_vector_clear(&p->rhstau); ae_vector_clear(&p->rhsalpha); ae_vector_clear(&p->rhssigma); ae_vector_clear(&p->rhsbeta); ae_vector_clear(&p->rhsgammaz); ae_vector_clear(&p->rhsgammas); ae_vector_clear(&p->rhsgammaw); ae_vector_clear(&p->rhsgammaq); ae_vector_clear(&p->rhsalphacap); ae_vector_clear(&p->rhsbetacap); ae_vector_clear(&p->rhsnucap); ae_vector_clear(&p->rhstaucap); ae_vector_clear(&p->deltaxy); ae_vector_clear(&p->tmphx); ae_vector_clear(&p->tmpax); ae_vector_clear(&p->tmpaty); ae_vector_clear(&p->dummyr); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmp2); ae_matrix_clear(&p->tmpr2); ae_vector_clear(&p->evalprogressg); ae_vector_clear(&p->tmpi); _sparsematrix_clear(&p->tmpsparse0); _sparsematrix_clear(&p->tmpsparse1); } void _vipmstate_destroy(void* _p) { vipmstate *p = (vipmstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->scl); ae_vector_destroy(&p->invscl); ae_vector_destroy(&p->xorigin); ae_vector_destroy(&p->c); ae_matrix_destroy(&p->denseh); _sparsematrix_destroy(&p->sparseh); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->rawbndl); ae_vector_destroy(&p->rawbndu); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_matrix_destroy(&p->denseafull); ae_matrix_destroy(&p->denseamain); _sparsematrix_destroy(&p->sparseafull); _sparsematrix_destroy(&p->sparseamain); _sparsematrix_destroy(&p->combinedaslack); ae_vector_destroy(&p->ascales); ae_vector_destroy(&p->b); ae_vector_destroy(&p->r); ae_vector_destroy(&p->hasr); _vipmvars_destroy(&p->current); _vipmvars_destroy(&p->trial); _vipmvars_destroy(&p->deltaaff); _vipmvars_destroy(&p->deltacorr); ae_vector_destroy(&p->diagginvz); ae_vector_destroy(&p->diagzinvg); ae_vector_destroy(&p->diagtinvs); ae_vector_destroy(&p->diagsinvt); ae_vector_destroy(&p->diagvinvw); ae_vector_destroy(&p->diagpinvq); ae_vector_destroy(&p->diagqinvp); ae_vector_destroy(&p->diagd); ae_vector_destroy(&p->diage); ae_matrix_destroy(&p->factdensehaug); ae_vector_destroy(&p->factdh); ae_vector_destroy(&p->factdz); ae_vector_destroy(&p->factinvdz); ae_vector_destroy(&p->facteffectivee); _sparsematrix_destroy(&p->factsparsekkttmpl); _sparsematrix_destroy(&p->factsparsekkt); ae_vector_destroy(&p->factsparsekktpivp); ae_vector_destroy(&p->factsparsekktpivq); ae_vector_destroy(&p->rhsrho); ae_vector_destroy(&p->rhsnu); ae_vector_destroy(&p->rhstau); ae_vector_destroy(&p->rhsalpha); ae_vector_destroy(&p->rhssigma); ae_vector_destroy(&p->rhsbeta); ae_vector_destroy(&p->rhsgammaz); ae_vector_destroy(&p->rhsgammas); ae_vector_destroy(&p->rhsgammaw); ae_vector_destroy(&p->rhsgammaq); ae_vector_destroy(&p->rhsalphacap); ae_vector_destroy(&p->rhsbetacap); ae_vector_destroy(&p->rhsnucap); ae_vector_destroy(&p->rhstaucap); ae_vector_destroy(&p->deltaxy); ae_vector_destroy(&p->tmphx); ae_vector_destroy(&p->tmpax); ae_vector_destroy(&p->tmpaty); ae_vector_destroy(&p->dummyr); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmp2); ae_matrix_destroy(&p->tmpr2); ae_vector_destroy(&p->evalprogressg); ae_vector_destroy(&p->tmpi); _sparsematrix_destroy(&p->tmpsparse0); _sparsematrix_destroy(&p->tmpsparse1); } #endif #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD) void minsqpinitbuf(/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* x0, ae_int_t n, /* Real */ ae_matrix* cleic, /* Integer */ ae_vector* lcsrcidx, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, double epsx, ae_int_t maxits, minsqpstate* state, ae_state *_state) { ae_int_t nslack; ae_int_t i; ae_int_t j; double v; double vv; nslack = n+2*(nec+nlec)+(nic+nlic); state->n = n; state->nec = nec; state->nic = nic; state->nlec = nlec; state->nlic = nlic; /* * Prepare RCOMM state */ ae_vector_set_length(&state->rstate.ia, 9+1, _state); ae_vector_set_length(&state->rstate.ba, 1+1, _state); ae_vector_set_length(&state->rstate.ra, 7+1, _state); state->rstate.stage = -1; state->needfij = ae_false; state->xupdated = ae_false; ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->fi, 1+nlec+nlic, _state); ae_matrix_set_length(&state->j, 1+nlec+nlic, n, _state); /* * Allocate memory. */ rvectorsetlengthatleast(&state->s, n, _state); rvectorsetlengthatleast(&state->step0x, n, _state); rvectorsetlengthatleast(&state->stepkx, n, _state); rvectorsetlengthatleast(&state->backupx, n, _state); rvectorsetlengthatleast(&state->step0fi, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->stepkfi, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->backupfi, 1+nlec+nlic, _state); rmatrixsetlengthatleast(&state->step0j, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&state->stepkj, 1+nlec+nlic, n, _state); rvectorsetlengthatleast(&state->fscales, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->meritlagmult, nec+nic+nlec+nlic, _state); rvectorsetlengthatleast(&state->dummylagmult, nec+nic+nlec+nlic, _state); bvectorsetlengthatleast(&state->hasbndl, n, _state); bvectorsetlengthatleast(&state->hasbndu, n, _state); rvectorsetlengthatleast(&state->scaledbndl, n, _state); rvectorsetlengthatleast(&state->scaledbndu, n, _state); rmatrixsetlengthatleast(&state->scaledcleic, nec+nic, n+1, _state); ivectorsetlengthatleast(&state->lcsrcidx, nec+nic, _state); /* * Prepare scaled problem */ for(i=0; i<=n-1; i++) { state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); if( state->hasbndl.ptr.p_bool[i] ) { state->scaledbndl.ptr.p_double[i] = bndl->ptr.p_double[i]/s->ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { state->scaledbndu.ptr.p_double[i] = bndu->ptr.p_double[i]/s->ptr.p_double[i]; } if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { ae_assert(ae_fp_less_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]), "SQP: integrity check failed, box constraints are inconsistent", _state); } state->step0x.ptr.p_double[i] = x0->ptr.p_double[i]/s->ptr.p_double[i]; state->s.ptr.p_double[i] = s->ptr.p_double[i]; } for(i=0; i<=nec+nic-1; i++) { /* * Permutation */ state->lcsrcidx.ptr.p_int[i] = lcsrcidx->ptr.p_int[i]; /* * Scale and normalize linear constraints */ vv = 0.0; for(j=0; j<=n-1; j++) { v = cleic->ptr.pp_double[i][j]*s->ptr.p_double[j]; state->scaledcleic.ptr.pp_double[i][j] = v; vv = vv+v*v; } vv = ae_sqrt(vv, _state); state->scaledcleic.ptr.pp_double[i][n] = cleic->ptr.pp_double[i][n]; if( ae_fp_greater(vv,(double)(0)) ) { for(j=0; j<=n; j++) { state->scaledcleic.ptr.pp_double[i][j] = state->scaledcleic.ptr.pp_double[i][j]/vv; } } } /* * Initial enforcement of box constraints */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state->step0x.ptr.p_double[i] = ae_maxreal(state->step0x.ptr.p_double[i], state->scaledbndl.ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { state->step0x.ptr.p_double[i] = ae_minreal(state->step0x.ptr.p_double[i], state->scaledbndu.ptr.p_double[i], _state); } } /* * Stopping criteria */ state->epsx = epsx; state->maxits = maxits; /* * Report fields */ state->repsimplexiterations = 0; state->repsimplexiterations1 = 0; state->repsimplexiterations2 = 0; state->repsimplexiterations3 = 0; state->repterminationtype = 0; state->repbcerr = (double)(0); state->repbcidx = -1; state->replcerr = (double)(0); state->replcidx = -1; state->repnlcerr = (double)(0); state->repnlcidx = -1; state->repiterationscount = 0; /* * Integrity checks */ ae_assert(ae_fp_less(nlcsqp_sqpdeltadecrease,nlcsqp_sqpdeltaincrease), "MinSQP: integrity check failed", _state); } /************************************************************************* This function performs actual processing for SQP algorithm. It expects that caller redirects its reverse communication requests NeedFiJ/XUpdated to external user who will provide analytic derivative (or handle reports about progress). In case external user does not have analytic derivative, it is responsibility of caller to intercept NeedFiJ request and replace it with appropriate numerical differentiation scheme. Results are stored: * point - in State.StepKX IMPORTANT: this function works with scaled problem formulation; it is responsibility of the caller to unscale request and scale Jacobian. NOTE: SMonitor is expected to be correctly initialized smoothness monitor. -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ ae_bool minsqpiteration(minsqpstate* state, smoothnessmonitor* smonitor, ae_bool userterminationneeded, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t i; ae_int_t j; double v; double vv; double mx; double gammamax; ae_int_t status; double deltamax; double multiplyby; double setscaleto; double prevtrustrad; ae_int_t subiterationidx; ae_bool dotrace; ae_bool dodetailedtrace; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; nslack = state->rstate.ia.ptr.p_int[1]; nec = state->rstate.ia.ptr.p_int[2]; nic = state->rstate.ia.ptr.p_int[3]; nlec = state->rstate.ia.ptr.p_int[4]; nlic = state->rstate.ia.ptr.p_int[5]; i = state->rstate.ia.ptr.p_int[6]; j = state->rstate.ia.ptr.p_int[7]; status = state->rstate.ia.ptr.p_int[8]; subiterationidx = state->rstate.ia.ptr.p_int[9]; dotrace = state->rstate.ba.ptr.p_bool[0]; dodetailedtrace = state->rstate.ba.ptr.p_bool[1]; v = state->rstate.ra.ptr.p_double[0]; vv = state->rstate.ra.ptr.p_double[1]; mx = state->rstate.ra.ptr.p_double[2]; gammamax = state->rstate.ra.ptr.p_double[3]; deltamax = state->rstate.ra.ptr.p_double[4]; multiplyby = state->rstate.ra.ptr.p_double[5]; setscaleto = state->rstate.ra.ptr.p_double[6]; prevtrustrad = state->rstate.ra.ptr.p_double[7]; } else { n = 359; nslack = -58; nec = -919; nic = -909; nlec = 81; nlic = 255; i = 74; j = -788; status = 809; subiterationidx = 205; dotrace = ae_false; dodetailedtrace = ae_true; v = -526; vv = 763; mx = -541; gammamax = -698; deltamax = -900; multiplyby = -318; setscaleto = -940; prevtrustrad = 1016; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } /* * Routine body */ n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); dotrace = ae_is_trace_enabled("SQP"); dodetailedtrace = dotrace&&ae_is_trace_enabled("SQP.DETAILED"); /* * Prepare rcomm interface */ state->needfij = ae_false; state->xupdated = ae_false; /* * Initialize algorithm data: * * Lagrangian and "Big C" estimates * * trust region * * initial function scales (vector of 1's) * * current approximation of the Hessian matrix H (unit matrix) * * initial linearized constraints * * initial violation of linear/nonlinear constraints */ state->fstagnationcnt = 0; state->trustrad = nlcsqp_inittrustrad; for(i=0; i<=nlec+nlic; i++) { state->fscales.ptr.p_double[i] = 1.0; } gammamax = 0.0; state->haslagmult = ae_false; /* * Avoid spurious warnings about possibly uninitialized vars */ status = 0; /* * Evaluate function vector and Jacobian at Step0X, send first location report. * Compute initial violation of constraints. */ nlcsqp_sqpsendx(state, &state->step0x, _state); state->needfij = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; if( !nlcsqp_sqpretrievefij(state, &state->step0fi, &state->step0j, _state) ) { /* * Failed to retrieve function/Jaconian, infinities detected! */ for(i=0; i<=n-1; i++) { state->stepkx.ptr.p_double[i] = state->step0x.ptr.p_double[i]; } state->repterminationtype = -8; result = ae_false; return result; } nlcsqp_sqpcopystate(state, &state->step0x, &state->step0fi, &state->step0j, &state->stepkx, &state->stepkfi, &state->stepkj, _state); nlcsqp_sqpsendx(state, &state->stepkx, _state); state->f = state->stepkfi.ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->xupdated = ae_false; checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, &state->stepkx, n, &state->replcerr, &state->replcidx, _state); unscaleandchecknlcviolation(&state->stepkfi, &state->fscales, nlec, nlic, &state->repnlcerr, &state->repnlcidx, _state); /* * Trace output (if needed) */ if( dotrace ) { ae_trace("\n\n"); ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); ae_trace("// SQP SOLVER STARTED //\n"); ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); } /* * Perform outer (NLC) iterations */ nlcsqp_initqpsubsolver(state, &state->subsolver, _state); lbl_3: if( ae_false ) { goto lbl_4; } /* * Before beginning new outer iteration: * * renormalize target function and/or constraints, if some of them have too large magnitudes * * save initial point for the outer iteration */ for(i=0; i<=nlec+nlic; i++) { /* * Determine (a) multiplicative coefficient applied to function value * and Jacobian row, and (b) new value of the function scale. */ mx = (double)(0); for(j=0; j<=n-1; j++) { mx = ae_maxreal(mx, ae_fabs(state->stepkj.ptr.pp_double[i][j], _state), _state); } multiplyby = 1.0; setscaleto = state->fscales.ptr.p_double[i]; if( ae_fp_greater_eq(mx,nlcsqp_sqpbigscale) ) { multiplyby = 1/mx; setscaleto = state->fscales.ptr.p_double[i]*mx; } if( ae_fp_less_eq(mx,nlcsqp_sqpsmallscale)&&ae_fp_greater(state->fscales.ptr.p_double[i],1.0) ) { if( ae_fp_greater(state->fscales.ptr.p_double[i]*mx,(double)(1)) ) { multiplyby = 1/mx; setscaleto = state->fscales.ptr.p_double[i]*mx; } else { multiplyby = state->fscales.ptr.p_double[i]; setscaleto = 1.0; } } if( ae_fp_neq(multiplyby,1.0) ) { /* * Function #I needs renormalization: * * update function vector element and Jacobian matrix row * * update FScales[] array */ state->stepkfi.ptr.p_double[i] = state->stepkfi.ptr.p_double[i]*multiplyby; for(j=0; j<=n-1; j++) { state->stepkj.ptr.pp_double[i][j] = state->stepkj.ptr.pp_double[i][j]*multiplyby; } state->fscales.ptr.p_double[i] = setscaleto; } } /* * Trace output (if needed) */ if( dotrace ) { ae_trace("\n=== OUTER ITERATION %5d STARTED ==================================================================\n", (int)(state->repiterationscount)); if( dodetailedtrace ) { ae_trace("> printing raw data (prior to applying variable and function scales)\n"); ae_trace("X (raw) = "); tracevectorunscaledunshiftedautoprec(&state->step0x, n, &state->s, ae_true, &state->s, ae_false, _state); ae_trace("\n"); ae_trace("> printing scaled data (after applying variable and function scales)\n"); ae_trace("X (scaled) = "); tracevectorautoprec(&state->step0x, 0, n, _state); ae_trace("\n"); ae_trace("FScales = "); tracevectorautoprec(&state->fscales, 0, 1+nlec+nlic, _state); ae_trace("\n"); ae_trace("Fi (scaled) = "); tracevectorautoprec(&state->stepkfi, 0, 1+nlec+nlic, _state); ae_trace("\n"); ae_trace("|Ji| (scaled) = "); tracerownrm1autoprec(&state->stepkj, 0, 1+nlec+nlic, 0, n, _state); ae_trace("\n"); } mx = (double)(0); for(i=1; i<=nlec; i++) { mx = ae_maxreal(mx, ae_fabs(state->stepkfi.ptr.p_double[i], _state), _state); } for(i=nlec+1; i<=nlec+nlic; i++) { mx = ae_maxreal(mx, state->stepkfi.ptr.p_double[i], _state); } ae_trace("trustRad = %0.3e\n", (double)(state->trustrad)); ae_trace("lin.violation = %0.3e (scaled violation of linear constraints)\n", (double)(state->replcerr)); ae_trace("nlc.violation = %0.3e (scaled violation of nonlinear constraints)\n", (double)(mx)); ae_trace("gammaMax = %0.3e\n", (double)(gammamax)); } /* * PHASE 2 * * This phase is a primary part of the algorithm which is responsible for its * convergence properties. * * It solves QP subproblem with possible activation and deactivation of constraints * and then starts backtracking (step length is bounded by 1.0) merit function search * (with second-order correction to deal with Maratos effect) on the direction produced * by QP subproblem. * * This phase is everything we need to in order to have convergence; however, * it has one performance-related issue: using "general" interior point QP solver * results in slow solution times. Fast equality-constrained phase is essential for * the convergence. */ nlcsqp_qpsubsolversetalgoipm(&state->subsolver, _state); nlcsqp_sqpcopystate(state, &state->stepkx, &state->stepkfi, &state->stepkj, &state->step0x, &state->step0fi, &state->step0j, _state); nlcsqp_meritphaseinit(&state->meritstate, &state->stepkx, &state->stepkfi, &state->stepkj, n, nec, nic, nlec, nlic, _state); lbl_5: if( !nlcsqp_meritphaseiteration(state, &state->meritstate, smonitor, userterminationneeded, _state) ) { goto lbl_6; } state->rstate.stage = 2; goto lbl_rcomm; lbl_2: goto lbl_5; lbl_6: nlcsqp_meritphaseresults(&state->meritstate, &state->stepkx, &state->stepkfi, &state->stepkj, &state->meritlagmult, &status, _state); if( status==0 ) { goto lbl_4; } ae_assert(status>0, "MinSQPIteration: integrity check failed", _state); state->haslagmult = ae_true; /* * Update trust region */ prevtrustrad = state->trustrad; deltamax = (double)(0); for(i=0; i<=n-1; i++) { deltamax = ae_maxreal(deltamax, ae_fabs(state->step0x.ptr.p_double[i]-state->stepkx.ptr.p_double[i], _state)/state->trustrad, _state); } if( ae_fp_less_eq(deltamax,nlcsqp_sqpdeltadecrease) ) { state->trustrad = state->trustrad*ae_maxreal(deltamax/nlcsqp_sqpdeltadecrease, nlcsqp_maxtrustraddecay, _state); } if( ae_fp_greater_eq(deltamax,nlcsqp_sqpdeltaincrease) ) { state->trustrad = state->trustrad*ae_minreal(deltamax/nlcsqp_sqpdeltaincrease, nlcsqp_maxtrustradgrowth, _state); } /* * Trace */ if( dotrace ) { ae_trace("\n--- outer iteration ends ---------------------------------------------------------------------------\n"); ae_trace("deltaMax = %0.3f (ratio of step length to trust radius)\n", (double)(deltamax)); ae_trace("newTrustRad = %0.3e", (double)(state->trustrad)); if( ae_fp_greater(state->trustrad,prevtrustrad) ) { ae_trace(", trust radius increased"); } if( ae_fp_less(state->trustrad,prevtrustrad) ) { ae_trace(", trust radius decreased"); } ae_trace("\n"); } /* * Advance outer iteration counter, test stopping criteria */ inc(&state->repiterationscount, _state); if( ae_fp_less_eq(ae_fabs(state->stepkfi.ptr.p_double[0]-state->step0fi.ptr.p_double[0], _state),nlcsqp_stagnationepsf*ae_fabs(state->step0fi.ptr.p_double[0], _state)) ) { inc(&state->fstagnationcnt, _state); } else { state->fstagnationcnt = 0; } if( ae_fp_less_eq(state->trustrad,state->epsx) ) { state->repterminationtype = 2; if( dotrace ) { ae_trace("> stopping condition met: trust radius is smaller than %0.3e\n", (double)(state->epsx)); } goto lbl_4; } if( state->maxits>0&&state->repiterationscount>=state->maxits ) { state->repterminationtype = 5; if( dotrace ) { ae_trace("> stopping condition met: %0d iterations performed\n", (int)(state->repiterationscount)); } goto lbl_4; } if( state->fstagnationcnt>=nlcsqp_fstagnationlimit ) { state->repterminationtype = 7; if( dotrace ) { ae_trace("> stopping criteria are too stringent: F stagnated for %0d its, stopping\n", (int)(state->fstagnationcnt)); } goto lbl_4; } goto lbl_3; lbl_4: smoothnessmonitortracestatus(smonitor, dotrace, _state); result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = nslack; state->rstate.ia.ptr.p_int[2] = nec; state->rstate.ia.ptr.p_int[3] = nic; state->rstate.ia.ptr.p_int[4] = nlec; state->rstate.ia.ptr.p_int[5] = nlic; state->rstate.ia.ptr.p_int[6] = i; state->rstate.ia.ptr.p_int[7] = j; state->rstate.ia.ptr.p_int[8] = status; state->rstate.ia.ptr.p_int[9] = subiterationidx; state->rstate.ba.ptr.p_bool[0] = dotrace; state->rstate.ba.ptr.p_bool[1] = dodetailedtrace; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = vv; state->rstate.ra.ptr.p_double[2] = mx; state->rstate.ra.ptr.p_double[3] = gammamax; state->rstate.ra.ptr.p_double[4] = deltamax; state->rstate.ra.ptr.p_double[5] = multiplyby; state->rstate.ra.ptr.p_double[6] = setscaleto; state->rstate.ra.ptr.p_double[7] = prevtrustrad; return result; } /************************************************************************* This function initializes SQP subproblem. Should be called once in the beginning of the optimization. INPUT PARAMETERS: SState - solver state Subsolver - SQP subproblem to initialize RETURN VALUE: True on success False on failure of the QP solver (unexpected... but possible due to numerical errors) -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_initqpsubsolver(minsqpstate* sstate, minsqpsubsolver* subsolver, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t lccnt; ae_int_t nnz; ae_int_t offs; ae_int_t i; ae_int_t j; n = sstate->n; nec = sstate->nec; nic = sstate->nic; nlec = sstate->nlec; nlic = sstate->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); lccnt = nec+nic+nlec+nlic; /* * Allocate temporaries */ rvectorsetlengthatleast(&subsolver->cural, lccnt, _state); rvectorsetlengthatleast(&subsolver->curau, lccnt, _state); rvectorsetlengthatleast(&subsolver->curbndl, nslack, _state); rvectorsetlengthatleast(&subsolver->curbndu, nslack, _state); rvectorsetlengthatleast(&subsolver->curb, nslack, _state); rvectorsetlengthatleast(&subsolver->sk, n, _state); rvectorsetlengthatleast(&subsolver->yk, n, _state); /* * Initial state */ subsolver->algokind = 0; rmatrixsetlengthatleast(&subsolver->h, n, n, _state); for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { subsolver->h.ptr.pp_double[i][j] = (double)(0); } subsolver->h.ptr.pp_double[i][i] = (double)(1); } /* * Linear constraints do not change across subiterations, that's * why we allocate storage for them at the start of the program. * * A full set of "raw" constraints is stored; later we will filter * out inequality ones which are inactive anywhere in the current * trust region. * * NOTE: because sparserawlc object stores only linear constraint * (linearizations of nonlinear ones are not stored) we * allocate only minimum necessary space. */ nnz = 0; for(i=0; i<=nec+nic-1; i++) { for(j=0; j<=n-1; j++) { if( sstate->scaledcleic.ptr.pp_double[i][j]!=0.0 ) { nnz = nnz+1; } } } ivectorsetlengthatleast(&subsolver->sparserawlc.ridx, nec+nic+1, _state); rvectorsetlengthatleast(&subsolver->sparserawlc.vals, nnz, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.idx, nnz, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.didx, nec+nic, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.uidx, nec+nic, _state); offs = 0; subsolver->sparserawlc.ridx.ptr.p_int[0] = 0; for(i=0; i<=nec+nic-1; i++) { for(j=0; j<=n-1; j++) { if( sstate->scaledcleic.ptr.pp_double[i][j]!=0.0 ) { /* * Primary part of the matrix */ subsolver->sparserawlc.vals.ptr.p_double[offs] = sstate->scaledcleic.ptr.pp_double[i][j]; subsolver->sparserawlc.idx.ptr.p_int[offs] = j; offs = offs+1; } } subsolver->sparserawlc.ridx.ptr.p_int[i+1] = offs; } subsolver->sparserawlc.matrixtype = 1; subsolver->sparserawlc.ninitialized = subsolver->sparserawlc.ridx.ptr.p_int[nec+nic]; subsolver->sparserawlc.m = nec+nic; subsolver->sparserawlc.n = n; sparseinitduidx(&subsolver->sparserawlc, _state); } /************************************************************************* This function sets subsolver algorithm to interior point method (IPM) -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_qpsubsolversetalgoipm(minsqpsubsolver* subsolver, ae_state *_state) { subsolver->algokind = 0; } /************************************************************************* This function sets subsolver algorithm to fast active set method -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_qpsubsolversetalgofastactiveset(minsqpsubsolver* subsolver, /* Real */ ae_vector* lagmult, ae_state *_state) { subsolver->algokind = 1; } /************************************************************************* Updates Hessian estimate, uses regularized formula which prevents Hessian eigenvalues from decreasing below sqrt(Eps) and rejects updates larger than 1/sqrt(Eps) in magnitude. INPUT PARAMETERS: SState - solver state Subsolver - SQP subproblem to initialize X0, G0 - point #0 and gradient at #0, array[N] X1, G1 - point #1 and gradient at #1, array[N] -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcsqp_qpsubproblemupdatehessian(minsqpstate* sstate, minsqpsubsolver* subsolver, /* Real */ ae_vector* x0, /* Real */ ae_vector* g0, /* Real */ ae_vector* x1, /* Real */ ae_vector* g1, ae_state *_state) { ae_int_t i; ae_int_t n; double shs; double rawsy; double sy; double snrm2; double ynrm2; double v2; double gk; double sk; double yk; double mxs; double mxy; double mxhs; double reg; double big; double growth; double eigold; double eignew; double eigcorrection; ae_bool result; /* * Algorithm parameters */ reg = 100*ae_sqrt(ae_machineepsilon, _state); big = 1/reg; growth = 100.0; /* * Proceed */ result = ae_false; n = sstate->n; rvectorsetlengthatleast(&subsolver->tmp0, n, _state); rawsy = (double)(0); sy = (double)(0); snrm2 = (double)(0); ynrm2 = (double)(0); v2 = (double)(0); mxs = (double)(0); mxy = (double)(0); for(i=0; i<=n-1; i++) { /* * Fetch components */ sk = x1->ptr.p_double[i]-x0->ptr.p_double[i]; yk = g1->ptr.p_double[i]-g0->ptr.p_double[i]; gk = g0->ptr.p_double[i]; /* * Compute raw (S,Y) without regularization (to be used later * during comparison with zero) */ rawsy = rawsy+sk*yk; /* * Convexify Y */ yk = yk+reg*sk; /* * Compute various coefficients using regularized values */ sy = sy+sk*yk; snrm2 = snrm2+sk*sk; ynrm2 = ynrm2+yk*yk; v2 = v2+gk*gk; mxs = ae_maxreal(mxs, ae_fabs(sk, _state), _state); mxy = ae_maxreal(mxy, ae_fabs(yk, _state), _state); subsolver->sk.ptr.p_double[i] = sk; subsolver->yk.ptr.p_double[i] = yk; } shs = rmatrixsyvmv(n, &subsolver->h, 0, 0, ae_true, &subsolver->sk, 0, &subsolver->tmp0, _state); rmatrixgemv(n, n, 1.0, &subsolver->h, 0, 0, 0, &subsolver->sk, 0, 0.0, &subsolver->tmp0, 0, _state); mxhs = (double)(0); for(i=0; i<=n-1; i++) { mxhs = ae_maxreal(mxhs, ae_fabs(subsolver->tmp0.ptr.p_double[i], _state), _state); } /* * Skip updates if (Sk,Yk)<=0 or Sk*H*Sk<=0 * * NOTE: we use 0.5*(SY+RawSY) in place of (Sk,Yk) which allows us to have slight * nonconvexity due to numerical noise. */ if( ae_fp_less_eq(0.5*(sy+rawsy),(double)(0)) ) { return result; } if( ae_fp_less_eq(shs,(double)(0)) ) { return result; } if( ae_fp_less_eq(snrm2,(double)(0)) ) { return result; } ae_assert(ae_fp_greater(sy,(double)(0)), "UpdateHessian: integrity check failed", _state); /* * Skip updates with too short steps * * NOTE: may prevent us from updating Hessian near the solution */ if( ae_fp_less_eq(mxs,sstate->epsx) ) { return result; } /* * Too large Hessian updates sometimes may come from noisy or nonsmooth problems. * * Skip updates with max(Yk)^2/(Yk,Sk)>=BIG or max(H*Sk)^2/(Sk*H*Sk)>=BIG */ if( ae_fp_greater_eq(ae_sqr(mxy, _state)/sy,big) ) { return result; } if( ae_fp_greater_eq(ae_sqr(mxhs, _state)/shs,big) ) { return result; } /* * Compare eigenvalues of H: old one removed by update, and new one. * We require that new eigenvalue is not much larger/smaller than the old one. * In order to enforce this condition we compute correction coefficient and * multiply one of the rank-1 updates by this coefficient. */ eigold = shs/snrm2; eignew = sy/snrm2; eignew = ae_minreal(eignew, eigold*growth, _state); eignew = ae_maxreal(eignew, eigold/growth, _state); eigcorrection = eignew/(sy/snrm2); /* * Update Hessian */ rmatrixger(n, n, &subsolver->h, 0, 0, -1/shs, &subsolver->tmp0, 0, &subsolver->tmp0, 0, _state); rmatrixger(n, n, &subsolver->h, 0, 0, eigcorrection*(1/sy), &subsolver->yk, 0, &subsolver->yk, 0, _state); result = ae_true; return result; } /************************************************************************* This function solves QP subproblem given by initial point X, function vector Fi and Jacobian Jac, and returns estimates of Lagrangian multipliers and search direction D[]. -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_fassolve(minsqpsubsolver* subsolver, /* Real */ ae_vector* d0, /* Real */ ae_matrix* h, ae_int_t nq, /* Real */ ae_vector* b, ae_int_t n, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, sparsematrix* a, ae_int_t m, /* Real */ ae_vector* al, /* Real */ ae_vector* au, double trustrad, ae_int_t* terminationtype, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state) { ae_int_t i; *terminationtype = 1; /* * Initial point, integrity check for constraints */ bvectorsetlengthatleast(&subsolver->hasbndl, n, _state); bvectorsetlengthatleast(&subsolver->hasbndu, n, _state); for(i=0; i<=n-1; i++) { subsolver->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); subsolver->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); ae_assert(!subsolver->hasbndl.ptr.p_bool[i]||ae_fp_less_eq(bndl->ptr.p_double[i],d0->ptr.p_double[i]), "FASSolve: integrity check failed", _state); ae_assert(!subsolver->hasbndu.ptr.p_bool[i]||ae_fp_greater_eq(bndu->ptr.p_double[i],d0->ptr.p_double[i]), "FASSolve: integrity check failed", _state); d->ptr.p_double[i] = d0->ptr.p_double[i]; } bvectorsetlengthatleast(&subsolver->hasal, m, _state); bvectorsetlengthatleast(&subsolver->hasau, m, _state); for(i=0; i<=m-1; i++) { subsolver->hasal.ptr.p_bool[i] = ae_isfinite(al->ptr.p_double[i], _state); subsolver->hasau.ptr.p_bool[i] = ae_isfinite(au->ptr.p_double[i], _state); if( subsolver->hasal.ptr.p_bool[i]&&subsolver->hasau.ptr.p_bool[i] ) { ae_assert(ae_fp_less_eq(al->ptr.p_double[i],au->ptr.p_double[i]), "FASSolve: integrity check failed", _state); } } rmatrixsetlengthatleast(&subsolver->activea, n, n, _state); rvectorsetlengthatleast(&subsolver->activerhs, n, _state); ivectorsetlengthatleast(&subsolver->activeidx, n, _state); subsolver->activesetsize = 0; /* * Activate equality constraints (at most N) */ for(i=0; i<=m-1; i++) { if( (subsolver->hasal.ptr.p_bool[i]&&subsolver->hasau.ptr.p_bool[i])&&ae_fp_eq(al->ptr.p_double[i],au->ptr.p_double[i]) ) { /* * Stop if full set of constraints is activated */ if( subsolver->activesetsize>=n ) { break; } } } rvectorsetlengthatleast(&subsolver->tmp0, n, _state); rvectorsetlengthatleast(&subsolver->tmp1, n, _state); for(i=0; i<=n-1; i++) { subsolver->tmp0.ptr.p_double[i] = trustrad; subsolver->tmp1.ptr.p_double[i] = 0.0; } vipminitdensewithslacks(&subsolver->ipmsolver, &subsolver->tmp0, &subsolver->tmp1, nq, n, _state); vipmsetquadraticlinear(&subsolver->ipmsolver, h, &subsolver->sparsedummy, 0, ae_true, b, _state); vipmsetconstraints(&subsolver->ipmsolver, bndl, bndu, a, m, &subsolver->densedummy, 0, al, au, _state); vipmoptimize(&subsolver->ipmsolver, &subsolver->tmp0, &subsolver->tmp1, &subsolver->tmp2, terminationtype, _state); if( *terminationtype<=0 ) { return; } for(i=0; i<=n-1; i++) { d->ptr.p_double[i] = subsolver->tmp0.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { lagmult->ptr.p_double[i] = subsolver->tmp2.ptr.p_double[i]; } } /************************************************************************* This function solves QP subproblem given by initial point X, function vector Fi and Jacobian Jac, and returns estimates of Lagrangian multipliers and search direction D[]. -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcsqp_qpsubproblemsolve(minsqpstate* state, minsqpsubsolver* subsolver, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t i; ae_int_t j; ae_int_t k; double v; double vv; double vright; double vmax; ae_int_t lccnt; ae_int_t offsslackec; ae_int_t offsslacknlec; ae_int_t offsslackic; ae_int_t offsslacknlic; ae_int_t offs; ae_int_t nnz; ae_int_t j0; ae_int_t j1; ae_int_t terminationtype; ae_bool result; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); lccnt = nec+nic+nlec+nlic; /* * Locations of slack variables */ offsslackec = n; offsslacknlec = n+2*nec; offsslackic = n+2*nec+2*nlec; offsslacknlic = n+2*(nec+nlec)+nic; /* * Prepare temporary structures */ rvectorgrowto(&subsolver->cural, lccnt, _state); rvectorgrowto(&subsolver->curau, lccnt, _state); rvectorsetlengthatleast(&subsolver->d0, nslack, _state); /* * Prepare default solution: all zeros */ result = ae_true; for(i=0; i<=nslack-1; i++) { d->ptr.p_double[i] = 0.0; subsolver->d0.ptr.p_double[i] = (double)(0); } for(i=0; i<=lccnt-1; i++) { lagmult->ptr.p_double[i] = (double)(0); } /* * Linear term B * * NOTE: elements [N,NSlack) are equal to bigC + perturbation to improve numeric properties of QP problem */ for(i=0; i<=n-1; i++) { subsolver->curb.ptr.p_double[i] = jac->ptr.pp_double[0][i]; } v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(jac->ptr.pp_double[0][i], _state); } v = coalesce(ae_sqrt(v, _state), 1.0, _state); for(i=n; i<=nslack-1; i++) { subsolver->curb.ptr.p_double[i] = (nlcsqp_bigc+1.0/(1+i))*v; } /* * Trust radius constraints for primary variables */ for(i=0; i<=n-1; i++) { subsolver->curbndl.ptr.p_double[i] = -state->trustrad; subsolver->curbndu.ptr.p_double[i] = state->trustrad; if( state->hasbndl.ptr.p_bool[i] ) { subsolver->curbndl.ptr.p_double[i] = ae_maxreal(subsolver->curbndl.ptr.p_double[i], state->scaledbndl.ptr.p_double[i]-x->ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { subsolver->curbndu.ptr.p_double[i] = ae_minreal(subsolver->curbndu.ptr.p_double[i], state->scaledbndu.ptr.p_double[i]-x->ptr.p_double[i], _state); } } /* * Prepare storage for "effective" constraining matrix */ nnz = subsolver->sparserawlc.ridx.ptr.p_int[nec+nic]; for(i=0; i<=nlec+nlic-1; i++) { for(j=0; j<=n-1; j++) { if( jac->ptr.pp_double[1+i][j]!=0.0 ) { nnz = nnz+1; } } } nnz = nnz+2*nec+nic; nnz = nnz+2*nlec+nlic; ivectorgrowto(&subsolver->sparseefflc.ridx, lccnt+1, _state); rvectorgrowto(&subsolver->sparseefflc.vals, nnz, _state); ivectorgrowto(&subsolver->sparseefflc.idx, nnz, _state); ivectorsetlengthatleast(&subsolver->sparseefflc.didx, lccnt, _state); ivectorsetlengthatleast(&subsolver->sparseefflc.uidx, lccnt, _state); subsolver->sparseefflc.m = 0; subsolver->sparseefflc.n = nslack; subsolver->sparseefflc.matrixtype = 1; /* * Append linear equality/inequality constraints * * Scan sparsified linear constraints stored in sparserawlc[], skip ones * which are inactive anywhere in the trust region. */ rvectorsetlengthatleast(&subsolver->tmp0, nslack, _state); for(i=0; i<=n-1; i++) { subsolver->tmp0.ptr.p_double[i] = x->ptr.p_double[i]; } for(i=n; i<=nslack-1; i++) { subsolver->tmp0.ptr.p_double[i] = (double)(0); } for(i=0; i<=nec+nic-1; i++) { /* * Calculate: * * VRight - product of X[] (extended with zeros up to NSlack elements) * and AR[i] - Ith row of sparserawlc matrix. * * VMax - maximum value of X*ARi computed over trust region */ vright = (double)(0); vmax = (double)(0); j0 = subsolver->sparserawlc.ridx.ptr.p_int[i]; j1 = subsolver->sparserawlc.ridx.ptr.p_int[i+1]-1; for(k=j0; k<=j1; k++) { j = subsolver->sparserawlc.idx.ptr.p_int[k]; v = subsolver->tmp0.ptr.p_double[j]; vv = subsolver->sparserawlc.vals.ptr.p_double[k]; vright = vright+vv*v; if( vv>=0 ) { vmax = vmax+vv*(v+subsolver->curbndu.ptr.p_double[j]); } else { vmax = vmax+vv*(v+subsolver->curbndl.ptr.p_double[j]); } } /* * If constraint is an inequality one and guaranteed to be inactive * within trust region, it is skipped (row itself is retained but * filled by zeros). */ if( i>=nec&&vmax<=state->scaledcleic.ptr.pp_double[i][n] ) { offs = subsolver->sparseefflc.ridx.ptr.p_int[i]; subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslackic+(i-nec); subsolver->sparseefflc.ridx.ptr.p_int[i+1] = offs+1; subsolver->cural.ptr.p_double[i] = 0.0; subsolver->curau.ptr.p_double[i] = 0.0; subsolver->curbndl.ptr.p_double[offsslackic+(i-nec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackic+(i-nec)] = (double)(0); continue; } /* * Start working on row I */ offs = subsolver->sparseefflc.ridx.ptr.p_int[i]; /* * Copy constraint from sparserawlc[] to sparseefflc[] */ j0 = subsolver->sparserawlc.ridx.ptr.p_int[i]; j1 = subsolver->sparserawlc.ridx.ptr.p_int[i+1]-1; for(k=j0; k<=j1; k++) { subsolver->sparseefflc.idx.ptr.p_int[offs] = subsolver->sparserawlc.idx.ptr.p_int[k]; subsolver->sparseefflc.vals.ptr.p_double[offs] = subsolver->sparserawlc.vals.ptr.p_double[k]; offs = offs+1; } /* * Set up slack variables */ if( isparseefflc.vals.ptr.p_double[offs+0] = (double)(-1); subsolver->sparseefflc.vals.ptr.p_double[offs+1] = (double)(1); subsolver->sparseefflc.idx.ptr.p_int[offs+0] = offsslackec+2*i+0; subsolver->sparseefflc.idx.ptr.p_int[offs+1] = offsslackec+2*i+1; offs = offs+2; } else { /* * Slack variables for inequality constraints */ subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslackic+(i-nec); offs = offs+1; } /* * Finalize row */ subsolver->sparseefflc.ridx.ptr.p_int[i+1] = offs; /* * Set up bounds and slack part of D0. * * NOTE: bounds for equality and inequality constraints are * handled differently */ v = vright-state->scaledcleic.ptr.pp_double[i][n]; if( icural.ptr.p_double[i] = -v; subsolver->curau.ptr.p_double[i] = -v; subsolver->curbndl.ptr.p_double[offsslackec+2*i+0] = (double)(0); subsolver->curbndl.ptr.p_double[offsslackec+2*i+1] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackec+2*i+0] = ae_fabs(v, _state); subsolver->curbndu.ptr.p_double[offsslackec+2*i+1] = ae_fabs(v, _state); if( ae_fp_greater_eq(v,(double)(0)) ) { subsolver->d0.ptr.p_double[offsslackec+2*i+0] = ae_fabs(v, _state); subsolver->d0.ptr.p_double[offsslackec+2*i+1] = (double)(0); } else { subsolver->d0.ptr.p_double[offsslackec+2*i+0] = (double)(0); subsolver->d0.ptr.p_double[offsslackec+2*i+1] = ae_fabs(v, _state); } } else { subsolver->cural.ptr.p_double[i] = _state->v_neginf; subsolver->curau.ptr.p_double[i] = -v; subsolver->curbndl.ptr.p_double[offsslackic+(i-nec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackic+(i-nec)] = ae_maxreal(v, (double)(0), _state); subsolver->d0.ptr.p_double[offsslackic+(i-nec)] = ae_maxreal(v, (double)(0), _state); } } subsolver->sparseefflc.m = subsolver->sparseefflc.m+(nec+nic); /* * Append nonlinear equality/inequality constraints */ for(i=0; i<=nlec+nlic-1; i++) { /* * Calculate scale coefficient */ vv = (double)(0); for(j=0; j<=n-1; j++) { v = jac->ptr.pp_double[1+i][j]; vv = vv+v*v; } vv = 1/coalesce(ae_sqrt(vv, _state), (double)(1), _state); /* * Copy scaled row */ offs = subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m+i]; for(j=0; j<=n-1; j++) { if( jac->ptr.pp_double[1+i][j]!=0.0 ) { subsolver->sparseefflc.vals.ptr.p_double[offs] = vv*jac->ptr.pp_double[1+i][j]; subsolver->sparseefflc.idx.ptr.p_int[offs] = j; offs = offs+1; } } if( isparseefflc.vals.ptr.p_double[offs+0] = (double)(-1); subsolver->sparseefflc.vals.ptr.p_double[offs+1] = (double)(1); subsolver->sparseefflc.idx.ptr.p_int[offs+0] = offsslacknlec+2*i+0; subsolver->sparseefflc.idx.ptr.p_int[offs+1] = offsslacknlec+2*i+1; offs = offs+2; } else { /* * Add slack terms for inequality constraints */ subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslacknlic+(i-nlec); offs = offs+1; } subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m+i+1] = offs; /* * Set box constraints on slack variables and bounds on linear equality/inequality constraints */ v = vv*fi->ptr.p_double[1+i]; if( icural.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curau.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curbndl.ptr.p_double[offsslacknlec+2*i+0] = (double)(0); subsolver->curbndl.ptr.p_double[offsslacknlec+2*i+1] = (double)(0); subsolver->curbndu.ptr.p_double[offsslacknlec+2*i+0] = ae_fabs(v, _state); subsolver->curbndu.ptr.p_double[offsslacknlec+2*i+1] = ae_fabs(v, _state); if( ae_fp_greater_eq(v,(double)(0)) ) { subsolver->d0.ptr.p_double[offsslacknlec+2*i+0] = ae_fabs(v, _state); subsolver->d0.ptr.p_double[offsslacknlec+2*i+1] = (double)(0); } else { subsolver->d0.ptr.p_double[offsslacknlec+2*i+0] = (double)(0); subsolver->d0.ptr.p_double[offsslacknlec+2*i+1] = ae_fabs(v, _state); } } else { /* * Inequality constraint */ subsolver->cural.ptr.p_double[subsolver->sparseefflc.m+i] = _state->v_neginf; subsolver->curau.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curbndl.ptr.p_double[offsslacknlic+(i-nlec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslacknlic+(i-nlec)] = ae_maxreal(v, (double)(0), _state); subsolver->d0.ptr.p_double[offsslacknlic+(i-nlec)] = ae_maxreal(v, (double)(0), _state); } } subsolver->sparseefflc.m = subsolver->sparseefflc.m+(nlec+nlic); /* * Finalize sparse matrix structure */ ae_assert(subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]<=subsolver->sparseefflc.idx.cnt, "QPSubproblemSolve: critical integrity check failed", _state); ae_assert(subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]<=subsolver->sparseefflc.vals.cnt, "QPSubproblemSolve: critical integrity check failed", _state); subsolver->sparseefflc.ninitialized = subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]; sparseinitduidx(&subsolver->sparseefflc, _state); /* * Solve quadratic program */ if( subsolver->algokind==0 ) { /* * Use dense IPM */ rvectorsetlengthatleast(&subsolver->tmp0, nslack, _state); rvectorsetlengthatleast(&subsolver->tmp1, nslack, _state); for(i=0; i<=nslack-1; i++) { subsolver->tmp0.ptr.p_double[i] = state->trustrad; subsolver->tmp1.ptr.p_double[i] = 0.0; } vipminitdensewithslacks(&subsolver->ipmsolver, &subsolver->tmp0, &subsolver->tmp1, n, nslack, _state); vipmsetquadraticlinear(&subsolver->ipmsolver, &subsolver->h, &subsolver->sparsedummy, 0, ae_true, &subsolver->curb, _state); vipmsetconstraints(&subsolver->ipmsolver, &subsolver->curbndl, &subsolver->curbndu, &subsolver->sparseefflc, subsolver->sparseefflc.m, &subsolver->densedummy, 0, &subsolver->cural, &subsolver->curau, _state); vipmoptimize(&subsolver->ipmsolver, &subsolver->tmp0, &subsolver->tmp1, &subsolver->tmp2, &terminationtype, _state); if( terminationtype<=0 ) { /* * QP solver failed due to numerical errors; exit */ result = ae_false; return result; } for(i=0; i<=nslack-1; i++) { d->ptr.p_double[i] = subsolver->tmp0.ptr.p_double[i]; } for(i=0; i<=lccnt-1; i++) { lagmult->ptr.p_double[i] = subsolver->tmp2.ptr.p_double[i]; } return result; } if( subsolver->algokind==1 ) { /* * Use fast active set */ nlcsqp_fassolve(subsolver, &subsolver->d0, &subsolver->h, n, &subsolver->curb, nslack, &subsolver->curbndl, &subsolver->curbndu, &subsolver->sparseefflc, subsolver->sparseefflc.m, &subsolver->cural, &subsolver->curau, state->trustrad, &terminationtype, d, lagmult, _state); if( terminationtype<=0 ) { /* * QP solver failed due to numerical errors; exit */ result = ae_false; return result; } return result; } /* * Unexpected */ ae_assert(ae_false, "SQP: unexpected subsolver type", _state); return result; } /************************************************************************* This function initializes MeritPhase temporaries. It should be called before beginning of each new iteration. You may call it multiple times for the same instance of MeritPhase temporaries. INPUT PARAMETERS: MeritState - instance to be initialized. N - problem dimensionality NEC, NIC - linear equality/inequality constraint count NLEC, NLIC - nonlinear equality/inequality constraint count OUTPUT PARAMETERS: MeritState - instance being initialized -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_meritphaseinit(minsqpmeritphasestate* meritstate, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t nslack; nslack = n+2*(nec+nlec)+(nic+nlic); meritstate->n = n; meritstate->nec = nec; meritstate->nic = nic; meritstate->nlec = nlec; meritstate->nlic = nlic; rvectorsetlengthatleast(&meritstate->d, nslack, _state); rvectorsetlengthatleast(&meritstate->dx, nslack, _state); rvectorsetlengthatleast(&meritstate->stepkx, n, _state); rvectorsetlengthatleast(&meritstate->stepkxc, n, _state); rvectorsetlengthatleast(&meritstate->stepkxn, n, _state); rvectorsetlengthatleast(&meritstate->stepkfi, 1+nlec+nlic, _state); rvectorsetlengthatleast(&meritstate->stepkfic, 1+nlec+nlic, _state); rvectorsetlengthatleast(&meritstate->stepkfin, 1+nlec+nlic, _state); rmatrixsetlengthatleast(&meritstate->stepkj, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&meritstate->stepkjc, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&meritstate->stepkjn, 1+nlec+nlic, n, _state); rvectorsetlengthatleast(&meritstate->stepklaggrad, n, _state); rvectorsetlengthatleast(&meritstate->stepknlaggrad, n, _state); rvectorsetlengthatleast(&meritstate->lagmult, nec+nic+nlec+nlic, _state); rvectorsetlengthatleast(&meritstate->dummylagmult, nec+nic+nlec+nlic, _state); for(i=0; i<=n-1; i++) { meritstate->stepkx.ptr.p_double[i] = curx->ptr.p_double[i]; } for(i=0; i<=nlec+nlic; i++) { meritstate->stepkfi.ptr.p_double[i] = curfi->ptr.p_double[i]; for(j=0; j<=n-1; j++) { meritstate->stepkj.ptr.pp_double[i][j] = curj->ptr.pp_double[i][j]; } } ae_vector_set_length(&meritstate->rmeritphasestate.ia, 7+1, _state); ae_vector_set_length(&meritstate->rmeritphasestate.ba, 3+1, _state); ae_vector_set_length(&meritstate->rmeritphasestate.ra, 8+1, _state); meritstate->rmeritphasestate.stage = -1; } /************************************************************************* This function tries to perform either phase #1 or phase #3 step. Former corresponds to linear model step (without conjugacy constraints) with correction for nonlinearity ("second order correction"). Such correction helps to overcome Maratos effect (a tendency of L1 penalized merit functions to reject nonzero steps). Latter is a step using linear model with no second order correction. INPUT PARAMETERS: State - SQP solver state SMonitor - smoothness monitor UserTerminationNeeded-True if user requested termination LagMult - array[NEC+NIC+NLEC+NLIC], contents ignored on input. OUTPUT PARAMETERS: State - RepTerminationType is set to current termination code (if Status=0). LagMult - filled with current Lagrange multipliers Status - when reverse communication is done, Status is set to: * positive value, if we can proceed to the next stage of the outer iteration * zero, if algorithm is terminated (RepTerminationType is set to appropriate value) -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcsqp_meritphaseiteration(minsqpstate* state, minsqpmeritphasestate* meritstate, smoothnessmonitor* smonitor, ae_bool userterminationneeded, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t i; ae_int_t j; double v; double mx; double f0; double f1; double nu; double localstp; double stepklagval; double stepknlagval; ae_bool hessianupdateperformed; ae_bool dotrace; ae_bool doprobing; ae_bool dotracexd; double stp; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( meritstate->rmeritphasestate.stage>=0 ) { n = meritstate->rmeritphasestate.ia.ptr.p_int[0]; nslack = meritstate->rmeritphasestate.ia.ptr.p_int[1]; nec = meritstate->rmeritphasestate.ia.ptr.p_int[2]; nic = meritstate->rmeritphasestate.ia.ptr.p_int[3]; nlec = meritstate->rmeritphasestate.ia.ptr.p_int[4]; nlic = meritstate->rmeritphasestate.ia.ptr.p_int[5]; i = meritstate->rmeritphasestate.ia.ptr.p_int[6]; j = meritstate->rmeritphasestate.ia.ptr.p_int[7]; hessianupdateperformed = meritstate->rmeritphasestate.ba.ptr.p_bool[0]; dotrace = meritstate->rmeritphasestate.ba.ptr.p_bool[1]; doprobing = meritstate->rmeritphasestate.ba.ptr.p_bool[2]; dotracexd = meritstate->rmeritphasestate.ba.ptr.p_bool[3]; v = meritstate->rmeritphasestate.ra.ptr.p_double[0]; mx = meritstate->rmeritphasestate.ra.ptr.p_double[1]; f0 = meritstate->rmeritphasestate.ra.ptr.p_double[2]; f1 = meritstate->rmeritphasestate.ra.ptr.p_double[3]; nu = meritstate->rmeritphasestate.ra.ptr.p_double[4]; localstp = meritstate->rmeritphasestate.ra.ptr.p_double[5]; stepklagval = meritstate->rmeritphasestate.ra.ptr.p_double[6]; stepknlagval = meritstate->rmeritphasestate.ra.ptr.p_double[7]; stp = meritstate->rmeritphasestate.ra.ptr.p_double[8]; } else { n = -229; nslack = -536; nec = 487; nic = -115; nlec = 886; nlic = 346; i = -722; j = -413; hessianupdateperformed = ae_true; dotrace = ae_true; doprobing = ae_true; dotracexd = ae_false; v = -154; mx = 306; f0 = -1011; f1 = 951; nu = -463; localstp = 88; stepklagval = -861; stepknlagval = -678; stp = -731; } if( meritstate->rmeritphasestate.stage==0 ) { goto lbl_0; } if( meritstate->rmeritphasestate.stage==1 ) { goto lbl_1; } if( meritstate->rmeritphasestate.stage==2 ) { goto lbl_2; } if( meritstate->rmeritphasestate.stage==3 ) { goto lbl_3; } /* * Routine body */ n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); dotrace = ae_is_trace_enabled("SQP"); dotracexd = dotrace&&ae_is_trace_enabled("SQP.DETAILED"); doprobing = ae_is_trace_enabled("SQP.PROBING"); ae_assert(meritstate->lagmult.cnt>=nec+nic+nlec+nlic, "MeritPhaseIteration: integrity check failed", _state); /* * Report iteration beginning */ if( dotrace ) { ae_trace("\n--- quadratic step with automatic second-order correction ------------------------------------------\n"); } /* * Default decision is to continue algorithm */ meritstate->status = 1; stp = (double)(0); /* * Determine step direction using linearized model with no conjugacy terms */ if( !nlcsqp_qpsubproblemsolve(state, &state->subsolver, &meritstate->stepkx, &meritstate->stepkfi, &meritstate->stepkj, &meritstate->d, &meritstate->lagmult, _state) ) { if( dotrace ) { ae_trace("> [WARNING] QP subproblem failed\n"); } result = ae_false; return result; } /* * Perform merit function line search. * * First, we try unit step. If it does not decrease merit function, * a second-order correction is tried (helps to combat Maratos effect). */ localstp = 1.0; f0 = nlcsqp_meritfunction(state, &meritstate->stepkx, &meritstate->stepkfi, &meritstate->lagmult, &meritstate->tmpmerit, _state); for(i=0; i<=n-1; i++) { meritstate->stepkxn.ptr.p_double[i] = meritstate->stepkx.ptr.p_double[i]+localstp*meritstate->d.ptr.p_double[i]; } nlcsqp_sqpsendx(state, &meritstate->stepkxn, _state); state->needfij = ae_true; meritstate->rmeritphasestate.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; if( !nlcsqp_sqpretrievefij(state, &meritstate->stepkfin, &meritstate->stepkjn, _state) ) { /* * Failed to retrieve func/Jac, infinities detected */ state->repterminationtype = -8; meritstate->status = 0; if( dotrace ) { ae_trace("[ERROR] infinities in target/constraints are detected\n"); } result = ae_false; return result; } smoothnessmonitorstartlinesearch(smonitor, &meritstate->stepkx, &meritstate->stepkfi, &meritstate->stepkj, _state); smoothnessmonitorenqueuepoint(smonitor, &meritstate->d, localstp, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->stepkjn, _state); f1 = nlcsqp_meritfunction(state, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->lagmult, &meritstate->tmpmerit, _state); if( ae_fp_less(f1,f0) ) { goto lbl_4; } /* * Full step increases merit function. Let's compute second order * correction to the constraint model and recompute trial step D: * * use original linearization of the target * * extrapolate linearization of nonlinear constraints at StepKX+D back to origin * */ meritstate->stepkfic.ptr.p_double[0] = meritstate->stepkfi.ptr.p_double[0]; for(j=0; j<=n-1; j++) { meritstate->stepkjc.ptr.pp_double[0][j] = meritstate->stepkj.ptr.pp_double[0][j]; } for(i=1; i<=nlec+nlic; i++) { v = (double)(0); for(j=0; j<=n-1; j++) { v = v+meritstate->d.ptr.p_double[j]*meritstate->stepkj.ptr.pp_double[i][j]; meritstate->stepkjc.ptr.pp_double[i][j] = meritstate->stepkj.ptr.pp_double[i][j]; } meritstate->stepkfic.ptr.p_double[i] = meritstate->stepkfin.ptr.p_double[i]-v; } if( !nlcsqp_qpsubproblemsolve(state, &state->subsolver, &meritstate->stepkx, &meritstate->stepkfic, &meritstate->stepkjc, &meritstate->dx, &meritstate->dummylagmult, _state) ) { if( dotrace ) { ae_trace("> [WARNING] second-order QP subproblem failed\n"); } result = ae_false; return result; } for(i=0; i<=n-1; i++) { meritstate->d.ptr.p_double[i] = meritstate->dx.ptr.p_double[i]; } /* * Perform line search, we again try full step (maybe it will work after SOC) * * NOTE: we use MeritLagMult - Lagrange multipliers computed for initial, * uncorrected task - for the merit function model. * Using DummyLagMult can destabilize algorithm. */ localstp = 1.0; nu = 0.5; f1 = f0; lbl_6: if( ae_false ) { goto lbl_7; } for(i=0; i<=n-1; i++) { meritstate->stepkxn.ptr.p_double[i] = meritstate->stepkx.ptr.p_double[i]+localstp*meritstate->d.ptr.p_double[i]; } nlcsqp_sqpsendx(state, &meritstate->stepkxn, _state); state->needfij = ae_true; meritstate->rmeritphasestate.stage = 1; goto lbl_rcomm; lbl_1: state->needfij = ae_false; if( !nlcsqp_sqpretrievefij(state, &meritstate->stepkfin, &meritstate->stepkjn, _state) ) { /* * Failed to retrieve func/Jac, infinities detected */ state->repterminationtype = -8; meritstate->status = 0; if( dotrace ) { ae_trace("[ERROR] infinities in target/constraints are detected\n"); } result = ae_false; return result; } smoothnessmonitorenqueuepoint(smonitor, &meritstate->d, localstp, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->stepkjn, _state); f1 = nlcsqp_meritfunction(state, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->lagmult, &meritstate->tmpmerit, _state); if( ae_fp_less(f1,f0) ) { /* * Step is found! */ goto lbl_7; } if( ae_fp_less(localstp,0.001) ) { /* * Step is shorter than 0.001 times current search direction, * it means that no good step can be found. */ localstp = (double)(0); nlcsqp_sqpcopystate(state, &meritstate->stepkx, &meritstate->stepkfi, &meritstate->stepkj, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->stepkjn, _state); goto lbl_7; } localstp = nu*localstp; nu = ae_maxreal(0.1, 0.5*nu, _state); goto lbl_6; lbl_7: lbl_4: smoothnessmonitorfinalizelinesearch(smonitor, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { meritstate->stepkxn.ptr.p_double[i] = ae_maxreal(meritstate->stepkxn.ptr.p_double[i], state->scaledbndl.ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { meritstate->stepkxn.ptr.p_double[i] = ae_minreal(meritstate->stepkxn.ptr.p_double[i], state->scaledbndu.ptr.p_double[i], _state); } } if( userterminationneeded ) { /* * User requested termination, break before we move to new point */ state->repterminationtype = 8; meritstate->status = 0; if( dotrace ) { ae_trace("> user requested termination\n"); } result = ae_false; return result; } nlcsqp_lagrangianfg(state, &meritstate->stepkx, state->trustrad, &meritstate->stepkfi, &meritstate->stepkj, &meritstate->lagmult, &meritstate->tmplagrangianfg, &stepklagval, &meritstate->stepklaggrad, _state); nlcsqp_lagrangianfg(state, &meritstate->stepkxn, state->trustrad, &meritstate->stepkfin, &meritstate->stepkjn, &meritstate->lagmult, &meritstate->tmplagrangianfg, &stepknlagval, &meritstate->stepknlaggrad, _state); /* * Trace */ if( !dotrace ) { goto lbl_8; } if( !doprobing ) { goto lbl_10; } smoothnessmonitorstartprobing(smonitor, 1.0, 2, state->trustrad, _state); lbl_12: if( !smoothnessmonitorprobe(smonitor, _state) ) { goto lbl_13; } for(j=0; j<=n-1; j++) { meritstate->stepkxc.ptr.p_double[j] = meritstate->stepkx.ptr.p_double[j]+smonitor->probingstp*meritstate->d.ptr.p_double[j]; if( state->hasbndl.ptr.p_bool[j] ) { meritstate->stepkxc.ptr.p_double[j] = ae_maxreal(meritstate->stepkxc.ptr.p_double[j], state->scaledbndl.ptr.p_double[j], _state); } if( state->hasbndu.ptr.p_bool[j] ) { meritstate->stepkxc.ptr.p_double[j] = ae_minreal(meritstate->stepkxc.ptr.p_double[j], state->scaledbndu.ptr.p_double[j], _state); } } nlcsqp_sqpsendx(state, &meritstate->stepkxc, _state); state->needfij = ae_true; meritstate->rmeritphasestate.stage = 2; goto lbl_rcomm; lbl_2: state->needfij = ae_false; if( !nlcsqp_sqpretrievefij(state, &meritstate->stepkfic, &meritstate->stepkjc, _state) ) { goto lbl_13; } smonitor->probingf.ptr.p_double[0] = nlcsqp_rawlagrangian(state, &meritstate->stepkxc, &meritstate->stepkfic, &meritstate->lagmult, &meritstate->tmpmerit, _state); smonitor->probingf.ptr.p_double[1] = meritstate->stepkfic.ptr.p_double[0]; goto lbl_12; lbl_13: ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | probing search direction suggested by QP subproblem |\n"); ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | Step | Lagrangian (unaugmentd)| Target function |\n"); ae_trace("*** |along D| must be smooth | must be smooth |\n"); ae_trace("*** | | function | slope | function | slope |\n"); smoothnessmonitortraceprobingresults(smonitor, _state); lbl_10: mx = (double)(0); for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(meritstate->d.ptr.p_double[i], _state)/state->trustrad, _state); } if( ae_fp_greater(localstp,(double)(0)) ) { ae_trace("> nonzero linear step was performed\n"); } else { ae_trace("> zero linear step was performed\n"); } ae_trace("max(|Di|)/TrustRad = %0.6f\n", (double)(mx)); ae_trace("stp = %0.6f\n", (double)(localstp)); if( dotracexd ) { ae_trace("X0 (scaled) = "); tracevectorautoprec(&meritstate->stepkx, 0, n, _state); ae_trace("\n"); ae_trace("D (scaled) = "); tracevectorautoprec(&meritstate->d, 0, n, _state); ae_trace("\n"); ae_trace("X1 (scaled) = "); tracevectorautoprec(&meritstate->stepkxn, 0, n, _state); ae_trace("\n"); } ae_trace("meritF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(f0), (double)(f1), (double)(f1-f0)); ae_trace("scaled-targetF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(meritstate->stepkfi.ptr.p_double[0]), (double)(meritstate->stepkfin.ptr.p_double[0]), (double)(meritstate->stepkfin.ptr.p_double[0]-meritstate->stepkfi.ptr.p_double[0])); ae_trace("> evaluating possible Hessian update\n"); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+(meritstate->stepkxn.ptr.p_double[i]-meritstate->stepkx.ptr.p_double[i])*(meritstate->stepknlaggrad.ptr.p_double[i]-meritstate->stepklaggrad.ptr.p_double[i]); } ae_trace("(Sk,Yk) = %0.3e\n", (double)(v)); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(meritstate->stepkxn.ptr.p_double[i]-meritstate->stepkx.ptr.p_double[i], _state); } ae_trace("(Sk,Sk) = %0.3e\n", (double)(v)); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(meritstate->stepknlaggrad.ptr.p_double[i]-meritstate->stepklaggrad.ptr.p_double[i], _state); } ae_trace("(Yk,Yk) = %0.3e\n", (double)(v)); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(meritstate->stepkxn.ptr.p_double[i]-meritstate->stepkx.ptr.p_double[i], _state)*state->subsolver.h.ptr.pp_double[i][i]; for(j=i+1; j<=n-1; j++) { v = v+2*(meritstate->stepkxn.ptr.p_double[i]-meritstate->stepkx.ptr.p_double[i])*state->subsolver.h.ptr.pp_double[i][j]*(meritstate->stepkxn.ptr.p_double[j]-meritstate->stepkx.ptr.p_double[j]); } } ae_trace("Sk*Bk*Sk = %0.3e\n", (double)(v)); v = state->subsolver.h.ptr.pp_double[0][0]; for(i=0; i<=n-1; i++) { v = ae_minreal(v, state->subsolver.h.ptr.pp_double[i][i], _state); } ae_trace("mindiag(Bk) = %0.3e\n", (double)(v)); v = state->subsolver.h.ptr.pp_double[0][0]; for(i=0; i<=n-1; i++) { v = ae_maxreal(v, state->subsolver.h.ptr.pp_double[i][i], _state); } ae_trace("maxdiag(Bk) = %0.3e\n", (double)(v)); lbl_8: /* * Perform Hessian update */ hessianupdateperformed = ae_false; if( ae_fp_greater(localstp,(double)(0)) ) { hessianupdateperformed = nlcsqp_qpsubproblemupdatehessian(state, &state->subsolver, &meritstate->stepkx, &meritstate->stepklaggrad, &meritstate->stepkxn, &meritstate->stepknlaggrad, _state); } if( dotrace ) { if( hessianupdateperformed ) { ae_trace("> Hessian updated\n"); v = state->subsolver.h.ptr.pp_double[0][0]; for(i=0; i<=n-1; i++) { v = ae_minreal(v, state->subsolver.h.ptr.pp_double[i][i], _state); } ae_trace("mindiag(Bk) = %0.3e\n", (double)(v)); v = state->subsolver.h.ptr.pp_double[0][0]; for(i=0; i<=n-1; i++) { v = ae_maxreal(v, state->subsolver.h.ptr.pp_double[i][i], _state); } ae_trace("maxdiag(Bk) = %0.3e\n", (double)(v)); } else { ae_trace("> skipping Hessian update\n"); } } /* * Move to new point */ stp = localstp; nlcsqp_sqpcopystate(state, &meritstate->stepkxn, &meritstate->stepkfin, &meritstate->stepkjn, &meritstate->stepkx, &meritstate->stepkfi, &meritstate->stepkj, _state); if( ae_fp_less_eq(localstp,(double)(0)) ) { goto lbl_14; } /* * Report one more inner iteration */ nlcsqp_sqpsendx(state, &meritstate->stepkx, _state); state->f = meritstate->stepkfi.ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; meritstate->rmeritphasestate.stage = 3; goto lbl_rcomm; lbl_3: state->xupdated = ae_false; /* * Update constraint violations */ checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, &meritstate->stepkx, n, &state->replcerr, &state->replcidx, _state); unscaleandchecknlcviolation(&meritstate->stepkfi, &state->fscales, nlec, nlic, &state->repnlcerr, &state->repnlcidx, _state); lbl_14: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; meritstate->rmeritphasestate.ia.ptr.p_int[0] = n; meritstate->rmeritphasestate.ia.ptr.p_int[1] = nslack; meritstate->rmeritphasestate.ia.ptr.p_int[2] = nec; meritstate->rmeritphasestate.ia.ptr.p_int[3] = nic; meritstate->rmeritphasestate.ia.ptr.p_int[4] = nlec; meritstate->rmeritphasestate.ia.ptr.p_int[5] = nlic; meritstate->rmeritphasestate.ia.ptr.p_int[6] = i; meritstate->rmeritphasestate.ia.ptr.p_int[7] = j; meritstate->rmeritphasestate.ba.ptr.p_bool[0] = hessianupdateperformed; meritstate->rmeritphasestate.ba.ptr.p_bool[1] = dotrace; meritstate->rmeritphasestate.ba.ptr.p_bool[2] = doprobing; meritstate->rmeritphasestate.ba.ptr.p_bool[3] = dotracexd; meritstate->rmeritphasestate.ra.ptr.p_double[0] = v; meritstate->rmeritphasestate.ra.ptr.p_double[1] = mx; meritstate->rmeritphasestate.ra.ptr.p_double[2] = f0; meritstate->rmeritphasestate.ra.ptr.p_double[3] = f1; meritstate->rmeritphasestate.ra.ptr.p_double[4] = nu; meritstate->rmeritphasestate.ra.ptr.p_double[5] = localstp; meritstate->rmeritphasestate.ra.ptr.p_double[6] = stepklagval; meritstate->rmeritphasestate.ra.ptr.p_double[7] = stepknlagval; meritstate->rmeritphasestate.ra.ptr.p_double[8] = stp; return result; } /************************************************************************* This function initializes MeritPhase temporaries. It should be called before beginning of each new iteration. You may call it multiple times for the same instance of MeritPhase temporaries. INPUT PARAMETERS: MeritState - instance to be initialized. N - problem dimensionality NEC, NIC - linear equality/inequality constraint count NLEC, NLIC - nonlinear equality/inequality constraint count OUTPUT PARAMETERS: MeritState - instance being initialized -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static void nlcsqp_meritphaseresults(minsqpmeritphasestate* meritstate, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, ae_int_t* status, ae_state *_state) { ae_int_t i; ae_int_t j; *status = meritstate->status; for(i=0; i<=meritstate->n-1; i++) { curx->ptr.p_double[i] = meritstate->stepkx.ptr.p_double[i]; } for(i=0; i<=meritstate->nlec+meritstate->nlic; i++) { curfi->ptr.p_double[i] = meritstate->stepkfi.ptr.p_double[i]; for(j=0; j<=meritstate->n-1; j++) { curj->ptr.pp_double[i][j] = meritstate->stepkj.ptr.pp_double[i][j]; } } ae_assert(lagmult->cnt>=meritstate->nec+meritstate->nic+meritstate->nlec+meritstate->nlic, "MeritPhaseResults: LagMult too short", _state); for(i=0; i<=meritstate->nec+meritstate->nic+meritstate->nlec+meritstate->nlic-1; i++) { lagmult->ptr.p_double[i] = meritstate->lagmult.ptr.p_double[i]; } } /************************************************************************* Copies X to State.X *************************************************************************/ static void nlcsqp_sqpsendx(minsqpstate* state, /* Real */ ae_vector* xs, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&xs->ptr.p_double[i]<=state->scaledbndl.ptr.p_double[i] ) { state->x.ptr.p_double[i] = state->scaledbndl.ptr.p_double[i]; continue; } if( state->hasbndu.ptr.p_bool[i]&&xs->ptr.p_double[i]>=state->scaledbndu.ptr.p_double[i] ) { state->x.ptr.p_double[i] = state->scaledbndu.ptr.p_double[i]; continue; } state->x.ptr.p_double[i] = xs->ptr.p_double[i]; } } /************************************************************************* Retrieves F-vector and scaled Jacobian, copies them to FiS and JS. Returns True on success, False on failure (when F or J are not finite numbers). *************************************************************************/ static ae_bool nlcsqp_sqpretrievefij(minsqpstate* state, /* Real */ ae_vector* fis, /* Real */ ae_matrix* js, ae_state *_state) { ae_int_t nlec; ae_int_t nlic; ae_int_t n; ae_int_t i; ae_int_t j; double v; double vv; ae_bool result; n = state->n; nlec = state->nlec; nlic = state->nlic; v = (double)(0); for(i=0; i<=nlec+nlic; i++) { vv = 1/state->fscales.ptr.p_double[i]; fis->ptr.p_double[i] = vv*state->fi.ptr.p_double[i]; v = 0.1*v+fis->ptr.p_double[i]; for(j=0; j<=n-1; j++) { js->ptr.pp_double[i][j] = vv*state->j.ptr.pp_double[i][j]; v = 0.1*v+js->ptr.pp_double[i][j]; } } result = ae_isfinite(v, _state); return result; } /************************************************************************* Copies state (X point, Fi vector, J jacobian) to preallocated storage. *************************************************************************/ static void nlcsqp_sqpcopystate(minsqpstate* state, /* Real */ ae_vector* x0, /* Real */ ae_vector* fi0, /* Real */ ae_matrix* j0, /* Real */ ae_vector* x1, /* Real */ ae_vector* fi1, /* Real */ ae_matrix* j1, ae_state *_state) { ae_int_t nlec; ae_int_t nlic; ae_int_t n; ae_int_t i; ae_int_t j; n = state->n; nlec = state->nlec; nlic = state->nlic; for(i=0; i<=n-1; i++) { x1->ptr.p_double[i] = x0->ptr.p_double[i]; } for(i=0; i<=nlec+nlic; i++) { fi1->ptr.p_double[i] = fi0->ptr.p_double[i]; for(j=0; j<=n-1; j++) { j1->ptr.pp_double[i][j] = j0->ptr.pp_double[i][j]; } } } /************************************************************************* This function calculates Lagrangian of the problem (in scaled variables): its value and gradient. Additionally it also estimates violation of linear constraints at the point as well as index of the most violated constraint *************************************************************************/ static void nlcsqp_lagrangianfg(minsqpstate* state, /* Real */ ae_vector* x, double trustrad, /* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* lagmult, minsqptmplagrangian* tmp, double* f, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; double v; double vlag; double vact; double vd; ae_bool usesparsegemv; *f = 0; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; /* * Target function */ *f = fi->ptr.p_double[0]; for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = j->ptr.pp_double[0][i]; } /* * Lagrangian terms for linear constraints, constraint violations */ if( nec+nic>0 ) { usesparsegemv = state->subsolver.sparserawlc.ridx.ptr.p_int[nec+nic]sclagtmp0, ae_maxint(nec+nic, n, _state), _state); rvectorsetlengthatleast(&tmp->sclagtmp1, ae_maxint(nec+nic, n, _state), _state); if( usesparsegemv ) { sparsemv(&state->subsolver.sparserawlc, x, &tmp->sclagtmp0, _state); } else { rmatrixgemv(nec+nic, n, 1.0, &state->scaledcleic, 0, 0, 0, x, 0, 0.0, &tmp->sclagtmp0, 0, _state); } for(i=0; i<=nec+nic-1; i++) { /* * Prepare */ v = tmp->sclagtmp0.ptr.p_double[i]-state->scaledcleic.ptr.pp_double[i][n]; vlag = lagmult->ptr.p_double[i]; tmp->sclagtmp1.ptr.p_double[i] = (double)(0); /* * Primary Lagrangian term */ vact = v; vd = (double)(1); *f = *f+vlag*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+vlag*vd; /* * Quadratic augmentation term */ if( i0 ) { vact = v; } else { vact = (double)(0); } *f = *f+0.5*nlcsqp_augmentationfactor*vact*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+nlcsqp_augmentationfactor*vact; } if( usesparsegemv ) { sparsemtv(&state->subsolver.sparserawlc, &tmp->sclagtmp1, &tmp->sclagtmp0, _state); for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = g->ptr.p_double[i]+tmp->sclagtmp0.ptr.p_double[i]; } } else { rmatrixgemv(n, nec+nic, 1.0, &state->scaledcleic, 0, 0, 1, &tmp->sclagtmp1, 0, 1.0, g, 0, _state); } } /* * Lagrangian terms for nonlinear constraints */ rvectorsetlengthatleast(&tmp->sclagtmp1, nlec+nlic, _state); for(i=0; i<=nlec+nlic-1; i++) { v = fi->ptr.p_double[1+i]; vlag = lagmult->ptr.p_double[nec+nic+i]; tmp->sclagtmp1.ptr.p_double[i] = (double)(0); /* * Lagrangian term */ vact = v; vd = (double)(1); *f = *f+vlag*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+vlag*vd; /* * Augmentation term */ if( i0 ) { vact = v; } else { vact = (double)(0); } *f = *f+0.5*nlcsqp_augmentationfactor*vact*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+nlcsqp_augmentationfactor*vact; } rmatrixgemv(n, nlec+nlic, 1.0, j, 1, 0, 1, &tmp->sclagtmp1, 0, 1.0, g, 0, _state); } /************************************************************************* This function calculates L1-penalized merit function *************************************************************************/ static double nlcsqp_meritfunction(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, ae_state *_state) { double tmp0; double tmp1; double result; nlcsqp_meritfunctionandrawlagrangian(state, x, fi, lagmult, tmp, &tmp0, &tmp1, _state); result = tmp0; return result; } /************************************************************************* This function calculates raw (unaugmented and smooth) Lagrangian *************************************************************************/ static double nlcsqp_rawlagrangian(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, ae_state *_state) { double tmp0; double tmp1; double result; nlcsqp_meritfunctionandrawlagrangian(state, x, fi, lagmult, tmp, &tmp0, &tmp1, _state); result = tmp1; return result; } /************************************************************************* This function calculates L1-penalized merit function and raw (smooth and un-augmented) Lagrangian *************************************************************************/ static void nlcsqp_meritfunctionandrawlagrangian(minsqpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minsqptmpmerit* tmp, double* meritf, double* rawlag, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; double v; *meritf = 0; *rawlag = 0; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; /* * Merit function and Lagrangian: primary term */ *meritf = fi->ptr.p_double[0]; *rawlag = fi->ptr.p_double[0]; /* * Merit function: augmentation and penalty for linear constraints */ rvectorsetlengthatleast(&tmp->mftmp0, nec+nic, _state); rmatrixgemv(nec+nic, n, 1.0, &state->scaledcleic, 0, 0, 0, x, 0, 0.0, &tmp->mftmp0, 0, _state); for(i=0; i<=nec+nic-1; i++) { v = tmp->mftmp0.ptr.p_double[i]-state->scaledcleic.ptr.pp_double[i][n]; if( iptr.p_double[i], _state), _state)*ae_fabs(v, _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[i]*v; } else { /* * Merit function: augmentation term + L1 penalty term */ *meritf = *meritf+0.5*nlcsqp_augmentationfactor*ae_sqr(ae_maxreal(v, (double)(0), _state), _state); *meritf = *meritf+nlcsqp_meritfunctionbase*ae_maxreal(v, (double)(0), _state)+nlcsqp_meritfunctiongain*ae_fabs(1+ae_fabs(lagmult->ptr.p_double[i], _state), _state)*ae_maxreal(v, (double)(0), _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[i]*v; } } /* * Merit function: augmentation and penalty for nonlinear constraints */ for(i=0; i<=nlec+nlic-1; i++) { v = fi->ptr.p_double[1+i]; if( iptr.p_double[nec+nic+i], _state), _state)*ae_fabs(v, _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[nec+nic+i]*v; } else { /* * Merit function: augmentation term + L1 penalty term */ *meritf = *meritf+0.5*nlcsqp_augmentationfactor*ae_sqr(ae_maxreal(v, (double)(0), _state), _state); *meritf = *meritf+nlcsqp_meritfunctionbase*ae_maxreal(v, (double)(0), _state)+nlcsqp_meritfunctiongain*ae_fabs(1+ae_fabs(lagmult->ptr.p_double[nec+nic+i], _state), _state)*ae_maxreal(v, (double)(0), _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[nec+nic+i]*v; } } } void _minsqpsubsolver_init(void* _p, ae_state *_state, ae_bool make_automatic) { minsqpsubsolver *p = (minsqpsubsolver*)_p; ae_touch_ptr((void*)p); _vipmstate_init(&p->ipmsolver, _state, make_automatic); ae_vector_init(&p->curb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cural, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curau, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparserawlc, _state, make_automatic); _sparsematrix_init(&p->sparseefflc, _state, make_automatic); ae_vector_init(&p->d0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->h, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->densedummy, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsedummy, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasal, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasau, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->activea, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->activerhs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->activeidx, 0, DT_INT, _state, make_automatic); } void _minsqpsubsolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minsqpsubsolver *dst = (minsqpsubsolver*)_dst; minsqpsubsolver *src = (minsqpsubsolver*)_src; dst->algokind = src->algokind; _vipmstate_init_copy(&dst->ipmsolver, &src->ipmsolver, _state, make_automatic); ae_vector_init_copy(&dst->curb, &src->curb, _state, make_automatic); ae_vector_init_copy(&dst->curbndl, &src->curbndl, _state, make_automatic); ae_vector_init_copy(&dst->curbndu, &src->curbndu, _state, make_automatic); ae_vector_init_copy(&dst->cural, &src->cural, _state, make_automatic); ae_vector_init_copy(&dst->curau, &src->curau, _state, make_automatic); _sparsematrix_init_copy(&dst->sparserawlc, &src->sparserawlc, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseefflc, &src->sparseefflc, _state, make_automatic); ae_vector_init_copy(&dst->d0, &src->d0, _state, make_automatic); ae_matrix_init_copy(&dst->h, &src->h, _state, make_automatic); ae_matrix_init_copy(&dst->densedummy, &src->densedummy, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsedummy, &src->sparsedummy, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_vector_init_copy(&dst->sk, &src->sk, _state, make_automatic); ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->hasal, &src->hasal, _state, make_automatic); ae_vector_init_copy(&dst->hasau, &src->hasau, _state, make_automatic); ae_matrix_init_copy(&dst->activea, &src->activea, _state, make_automatic); ae_vector_init_copy(&dst->activerhs, &src->activerhs, _state, make_automatic); ae_vector_init_copy(&dst->activeidx, &src->activeidx, _state, make_automatic); dst->activesetsize = src->activesetsize; } void _minsqpsubsolver_clear(void* _p) { minsqpsubsolver *p = (minsqpsubsolver*)_p; ae_touch_ptr((void*)p); _vipmstate_clear(&p->ipmsolver); ae_vector_clear(&p->curb); ae_vector_clear(&p->curbndl); ae_vector_clear(&p->curbndu); ae_vector_clear(&p->cural); ae_vector_clear(&p->curau); _sparsematrix_clear(&p->sparserawlc); _sparsematrix_clear(&p->sparseefflc); ae_vector_clear(&p->d0); ae_matrix_clear(&p->h); ae_matrix_clear(&p->densedummy); _sparsematrix_clear(&p->sparsedummy); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmp2); ae_vector_clear(&p->sk); ae_vector_clear(&p->yk); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->hasal); ae_vector_clear(&p->hasau); ae_matrix_clear(&p->activea); ae_vector_clear(&p->activerhs); ae_vector_clear(&p->activeidx); } void _minsqpsubsolver_destroy(void* _p) { minsqpsubsolver *p = (minsqpsubsolver*)_p; ae_touch_ptr((void*)p); _vipmstate_destroy(&p->ipmsolver); ae_vector_destroy(&p->curb); ae_vector_destroy(&p->curbndl); ae_vector_destroy(&p->curbndu); ae_vector_destroy(&p->cural); ae_vector_destroy(&p->curau); _sparsematrix_destroy(&p->sparserawlc); _sparsematrix_destroy(&p->sparseefflc); ae_vector_destroy(&p->d0); ae_matrix_destroy(&p->h); ae_matrix_destroy(&p->densedummy); _sparsematrix_destroy(&p->sparsedummy); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmp2); ae_vector_destroy(&p->sk); ae_vector_destroy(&p->yk); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->hasal); ae_vector_destroy(&p->hasau); ae_matrix_destroy(&p->activea); ae_vector_destroy(&p->activerhs); ae_vector_destroy(&p->activeidx); } void _minsqptmplagrangian_init(void* _p, ae_state *_state, ae_bool make_automatic) { minsqptmplagrangian *p = (minsqptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->sclagtmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclagtmp1, 0, DT_REAL, _state, make_automatic); } void _minsqptmplagrangian_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minsqptmplagrangian *dst = (minsqptmplagrangian*)_dst; minsqptmplagrangian *src = (minsqptmplagrangian*)_src; ae_vector_init_copy(&dst->sclagtmp0, &src->sclagtmp0, _state, make_automatic); ae_vector_init_copy(&dst->sclagtmp1, &src->sclagtmp1, _state, make_automatic); } void _minsqptmplagrangian_clear(void* _p) { minsqptmplagrangian *p = (minsqptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->sclagtmp0); ae_vector_clear(&p->sclagtmp1); } void _minsqptmplagrangian_destroy(void* _p) { minsqptmplagrangian *p = (minsqptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->sclagtmp0); ae_vector_destroy(&p->sclagtmp1); } void _minsqptmpmerit_init(void* _p, ae_state *_state, ae_bool make_automatic) { minsqptmpmerit *p = (minsqptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->mftmp0, 0, DT_REAL, _state, make_automatic); } void _minsqptmpmerit_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minsqptmpmerit *dst = (minsqptmpmerit*)_dst; minsqptmpmerit *src = (minsqptmpmerit*)_src; ae_vector_init_copy(&dst->mftmp0, &src->mftmp0, _state, make_automatic); } void _minsqptmpmerit_clear(void* _p) { minsqptmpmerit *p = (minsqptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->mftmp0); } void _minsqptmpmerit_destroy(void* _p) { minsqptmpmerit *p = (minsqptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->mftmp0); } void _minsqpmeritphasestate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minsqpmeritphasestate *p = (minsqpmeritphasestate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkxc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkxn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfic, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfin, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkj, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjc, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjn, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->lagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dummylagmult, 0, DT_REAL, _state, make_automatic); _minsqptmpmerit_init(&p->tmpmerit, _state, make_automatic); _minsqptmplagrangian_init(&p->tmplagrangianfg, _state, make_automatic); ae_vector_init(&p->stepklaggrad, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepknlaggrad, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rmeritphasestate, _state, make_automatic); } void _minsqpmeritphasestate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minsqpmeritphasestate *dst = (minsqpmeritphasestate*)_dst; minsqpmeritphasestate *src = (minsqpmeritphasestate*)_src; dst->n = src->n; dst->nec = src->nec; dst->nic = src->nic; dst->nlec = src->nlec; dst->nlic = src->nlic; ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->dx, &src->dx, _state, make_automatic); ae_vector_init_copy(&dst->stepkx, &src->stepkx, _state, make_automatic); ae_vector_init_copy(&dst->stepkxc, &src->stepkxc, _state, make_automatic); ae_vector_init_copy(&dst->stepkxn, &src->stepkxn, _state, make_automatic); ae_vector_init_copy(&dst->stepkfi, &src->stepkfi, _state, make_automatic); ae_vector_init_copy(&dst->stepkfic, &src->stepkfic, _state, make_automatic); ae_vector_init_copy(&dst->stepkfin, &src->stepkfin, _state, make_automatic); ae_matrix_init_copy(&dst->stepkj, &src->stepkj, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjc, &src->stepkjc, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjn, &src->stepkjn, _state, make_automatic); ae_vector_init_copy(&dst->lagmult, &src->lagmult, _state, make_automatic); ae_vector_init_copy(&dst->dummylagmult, &src->dummylagmult, _state, make_automatic); _minsqptmpmerit_init_copy(&dst->tmpmerit, &src->tmpmerit, _state, make_automatic); _minsqptmplagrangian_init_copy(&dst->tmplagrangianfg, &src->tmplagrangianfg, _state, make_automatic); ae_vector_init_copy(&dst->stepklaggrad, &src->stepklaggrad, _state, make_automatic); ae_vector_init_copy(&dst->stepknlaggrad, &src->stepknlaggrad, _state, make_automatic); dst->status = src->status; _rcommstate_init_copy(&dst->rmeritphasestate, &src->rmeritphasestate, _state, make_automatic); } void _minsqpmeritphasestate_clear(void* _p) { minsqpmeritphasestate *p = (minsqpmeritphasestate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->d); ae_vector_clear(&p->dx); ae_vector_clear(&p->stepkx); ae_vector_clear(&p->stepkxc); ae_vector_clear(&p->stepkxn); ae_vector_clear(&p->stepkfi); ae_vector_clear(&p->stepkfic); ae_vector_clear(&p->stepkfin); ae_matrix_clear(&p->stepkj); ae_matrix_clear(&p->stepkjc); ae_matrix_clear(&p->stepkjn); ae_vector_clear(&p->lagmult); ae_vector_clear(&p->dummylagmult); _minsqptmpmerit_clear(&p->tmpmerit); _minsqptmplagrangian_clear(&p->tmplagrangianfg); ae_vector_clear(&p->stepklaggrad); ae_vector_clear(&p->stepknlaggrad); _rcommstate_clear(&p->rmeritphasestate); } void _minsqpmeritphasestate_destroy(void* _p) { minsqpmeritphasestate *p = (minsqpmeritphasestate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->d); ae_vector_destroy(&p->dx); ae_vector_destroy(&p->stepkx); ae_vector_destroy(&p->stepkxc); ae_vector_destroy(&p->stepkxn); ae_vector_destroy(&p->stepkfi); ae_vector_destroy(&p->stepkfic); ae_vector_destroy(&p->stepkfin); ae_matrix_destroy(&p->stepkj); ae_matrix_destroy(&p->stepkjc); ae_matrix_destroy(&p->stepkjn); ae_vector_destroy(&p->lagmult); ae_vector_destroy(&p->dummylagmult); _minsqptmpmerit_destroy(&p->tmpmerit); _minsqptmplagrangian_destroy(&p->tmplagrangianfg); ae_vector_destroy(&p->stepklaggrad); ae_vector_destroy(&p->stepknlaggrad); _rcommstate_destroy(&p->rmeritphasestate); } void _minsqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minsqpstate *p = (minsqpstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->scaledcleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->lcsrcidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->scaledbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->scaledbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); _minsqpmeritphasestate_init(&p->meritstate, _state, make_automatic); ae_vector_init(&p->step0x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->backupx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->step0fi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->backupfi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->step0j, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkj, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->meritlagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dummylagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fscales, 0, DT_REAL, _state, make_automatic); _minsqpsubsolver_init(&p->subsolver, _state, make_automatic); _minsqptmpmerit_init(&p->tmpmerit, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); } void _minsqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minsqpstate *dst = (minsqpstate*)_dst; minsqpstate *src = (minsqpstate*)_src; dst->n = src->n; dst->nec = src->nec; dst->nic = src->nic; dst->nlec = src->nlec; dst->nlic = src->nlic; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_matrix_init_copy(&dst->scaledcleic, &src->scaledcleic, _state, make_automatic); ae_vector_init_copy(&dst->lcsrcidx, &src->lcsrcidx, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndl, &src->scaledbndl, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndu, &src->scaledbndu, _state, make_automatic); dst->epsx = src->epsx; dst->maxits = src->maxits; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); dst->f = src->f; dst->needfij = src->needfij; dst->xupdated = src->xupdated; _minsqpmeritphasestate_init_copy(&dst->meritstate, &src->meritstate, _state, make_automatic); dst->trustrad = src->trustrad; dst->fstagnationcnt = src->fstagnationcnt; ae_vector_init_copy(&dst->step0x, &src->step0x, _state, make_automatic); ae_vector_init_copy(&dst->stepkx, &src->stepkx, _state, make_automatic); ae_vector_init_copy(&dst->backupx, &src->backupx, _state, make_automatic); ae_vector_init_copy(&dst->step0fi, &src->step0fi, _state, make_automatic); ae_vector_init_copy(&dst->stepkfi, &src->stepkfi, _state, make_automatic); ae_vector_init_copy(&dst->backupfi, &src->backupfi, _state, make_automatic); ae_matrix_init_copy(&dst->step0j, &src->step0j, _state, make_automatic); ae_matrix_init_copy(&dst->stepkj, &src->stepkj, _state, make_automatic); dst->haslagmult = src->haslagmult; ae_vector_init_copy(&dst->meritlagmult, &src->meritlagmult, _state, make_automatic); ae_vector_init_copy(&dst->dummylagmult, &src->dummylagmult, _state, make_automatic); ae_vector_init_copy(&dst->fscales, &src->fscales, _state, make_automatic); _minsqpsubsolver_init_copy(&dst->subsolver, &src->subsolver, _state, make_automatic); _minsqptmpmerit_init_copy(&dst->tmpmerit, &src->tmpmerit, _state, make_automatic); dst->repsimplexiterations = src->repsimplexiterations; dst->repsimplexiterations1 = src->repsimplexiterations1; dst->repsimplexiterations2 = src->repsimplexiterations2; dst->repsimplexiterations3 = src->repsimplexiterations3; dst->repiterationscount = src->repiterationscount; dst->repterminationtype = src->repterminationtype; dst->repbcerr = src->repbcerr; dst->repbcidx = src->repbcidx; dst->replcerr = src->replcerr; dst->replcidx = src->replcidx; dst->repnlcerr = src->repnlcerr; dst->repnlcidx = src->repnlcidx; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); } void _minsqpstate_clear(void* _p) { minsqpstate *p = (minsqpstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_matrix_clear(&p->scaledcleic); ae_vector_clear(&p->lcsrcidx); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->scaledbndl); ae_vector_clear(&p->scaledbndu); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); _minsqpmeritphasestate_clear(&p->meritstate); ae_vector_clear(&p->step0x); ae_vector_clear(&p->stepkx); ae_vector_clear(&p->backupx); ae_vector_clear(&p->step0fi); ae_vector_clear(&p->stepkfi); ae_vector_clear(&p->backupfi); ae_matrix_clear(&p->step0j); ae_matrix_clear(&p->stepkj); ae_vector_clear(&p->meritlagmult); ae_vector_clear(&p->dummylagmult); ae_vector_clear(&p->fscales); _minsqpsubsolver_clear(&p->subsolver); _minsqptmpmerit_clear(&p->tmpmerit); _rcommstate_clear(&p->rstate); } void _minsqpstate_destroy(void* _p) { minsqpstate *p = (minsqpstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_matrix_destroy(&p->scaledcleic); ae_vector_destroy(&p->lcsrcidx); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->scaledbndl); ae_vector_destroy(&p->scaledbndu); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); _minsqpmeritphasestate_destroy(&p->meritstate); ae_vector_destroy(&p->step0x); ae_vector_destroy(&p->stepkx); ae_vector_destroy(&p->backupx); ae_vector_destroy(&p->step0fi); ae_vector_destroy(&p->stepkfi); ae_vector_destroy(&p->backupfi); ae_matrix_destroy(&p->step0j); ae_matrix_destroy(&p->stepkj); ae_vector_destroy(&p->meritlagmult); ae_vector_destroy(&p->dummylagmult); ae_vector_destroy(&p->fscales); _minsqpsubsolver_destroy(&p->subsolver); _minsqptmpmerit_destroy(&p->tmpmerit); _rcommstate_destroy(&p->rstate); } #endif #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using a quasi- Newton method (LBFGS scheme) which is optimized to use a minimum amount of memory. The subroutine generates the approximation of an inverse Hessian matrix by using information about the last M steps of the algorithm (instead of N). It lessens a required amount of memory from a value of order N^2 to a value of order 2*N*M. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinLBFGSCreate() call 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() and other functions 3. User calls MinLBFGSOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinLBFGSResults() to get solution 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLBFGSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension. N>0 M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - initial solution approximation, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLBFGSSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLBFGSSetStpMax() function to bound algorithm's steps. However, L-BFGS rarely needs such a tuning. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgscreate(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, minlbfgsstate* state, ae_state *_state) { _minlbfgsstate_clear(state); ae_assert(n>=1, "MinLBFGSCreate: N<1!", _state); ae_assert(m>=1, "MinLBFGSCreate: M<1", _state); ae_assert(m<=n, "MinLBFGSCreate: M>N", _state); ae_assert(x->cnt>=n, "MinLBFGSCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of corrections in the BFGS scheme of Hessian approximation update. Recommended value: 3<=M<=7. The smaller value causes worse convergence, the bigger will not cause a considerably better convergence, but will cause a fall in the performance. M<=N. X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinLBFGSSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. LBFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minlbfgscreatef(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, double diffstep, minlbfgsstate* state, ae_state *_state) { _minlbfgsstate_clear(state); ae_assert(n>=1, "MinLBFGSCreateF: N too small!", _state); ae_assert(m>=1, "MinLBFGSCreateF: M<1", _state); ae_assert(m<=n, "MinLBFGSCreateF: M>N", _state); ae_assert(x->cnt>=n, "MinLBFGSCreateF: Length(X)=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinLBFGSSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetcond(minlbfgsstate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinLBFGSSetCond: EpsG is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinLBFGSSetCond: negative EpsG!", _state); ae_assert(ae_isfinite(epsf, _state), "MinLBFGSSetCond: EpsF is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinLBFGSSetCond: negative EpsF!", _state); ae_assert(ae_isfinite(epsx, _state), "MinLBFGSSetCond: EpsX is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinLBFGSSetCond: negative EpsX!", _state); ae_assert(maxits>=0, "MinLBFGSSetCond: negative MaxIts!", _state); if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->epsg = epsg; state->epsf = epsf; state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinLBFGSOptimize(). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetxrep(minlbfgsstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetstpmax(minlbfgsstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinLBFGSSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinLBFGSSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* This function sets scaling coefficients for LBFGS optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the LBFGS too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinLBFGSSetPrec...() functions. There is special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlbfgssetscale(minlbfgsstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinLBFGSSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinLBFGSSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinLBFGSSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* Extended subroutine for internal use only. Accepts additional parameters: Flags - additional settings: * Flags = 0 means no additional settings * Flags = 1 "do not allocate memory". used when solving a many subsequent tasks with same N/M values. First call MUST be without this flag bit set, subsequent calls of MinLBFGS with same MinLBFGSState structure can set Flags to 1. DiffStep - numerical differentiation step -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgscreatex(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, ae_int_t flags, double diffstep, minlbfgsstate* state, ae_state *_state) { ae_bool allocatemem; ae_int_t i; ae_assert(n>=1, "MinLBFGS: N too small!", _state); ae_assert(m>=1, "MinLBFGS: M too small!", _state); ae_assert(m<=n, "MinLBFGS: M too large!", _state); /* * Initialize */ state->teststep = (double)(0); state->smoothnessguardlevel = 0; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); state->diffstep = diffstep; state->n = n; state->m = m; allocatemem = flags%2==0; flags = flags/2; if( allocatemem ) { rvectorsetlengthatleast(&state->rho, m, _state); rvectorsetlengthatleast(&state->theta, m, _state); rmatrixsetlengthatleast(&state->yk, m, n, _state); rmatrixsetlengthatleast(&state->sk, m, n, _state); rvectorsetlengthatleast(&state->d, n, _state); rvectorsetlengthatleast(&state->xp, n, _state); rvectorsetlengthatleast(&state->x, n, _state); rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->s, n, _state); rvectorsetlengthatleast(&state->invs, n, _state); rvectorsetlengthatleast(&state->lastscaleused, n, _state); rvectorsetlengthatleast(&state->g, n, _state); rvectorsetlengthatleast(&state->work, n, _state); } for(i=0; i<=n-1; i++) { state->s.ptr.p_double[i] = 1.0; state->invs.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; } state->prectype = 0; minlbfgssetcond(state, (double)(0), (double)(0), (double)(0), 0, _state); minlbfgssetxrep(state, ae_false, _state); minlbfgssetstpmax(state, (double)(0), _state); minlbfgsrestartfrom(state, x, _state); } /************************************************************************* Modification of the preconditioner: default preconditioner (simple scaling, same for all elements of X) is used. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state) { state->prectype = 0; } /************************************************************************* Modification of the preconditioner: Cholesky factorization of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state P - triangular preconditioner, Cholesky factorization of the approximate Hessian. array[0..N-1,0..N-1], (if larger, only leading N elements are used). IsUpper - whether upper or lower triangle of P is given (other triangle is not referenced) After call to this function preconditioner is changed to P (P is copied into the internal buffer). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: P should be nonsingular. Exception will be thrown otherwise. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetpreccholesky(minlbfgsstate* state, /* Real */ ae_matrix* p, ae_bool isupper, ae_state *_state) { ae_int_t i; double mx; ae_assert(isfinitertrmatrix(p, state->n, isupper, _state), "MinLBFGSSetPrecCholesky: P contains infinite or NAN values!", _state); mx = (double)(0); for(i=0; i<=state->n-1; i++) { mx = ae_maxreal(mx, ae_fabs(p->ptr.pp_double[i][i], _state), _state); } ae_assert(ae_fp_greater(mx,(double)(0)), "MinLBFGSSetPrecCholesky: P is strictly singular!", _state); if( state->denseh.rowsn||state->denseh.colsn ) { ae_matrix_set_length(&state->denseh, state->n, state->n, _state); } state->prectype = 1; if( isupper ) { rmatrixcopy(state->n, state->n, p, 0, 0, &state->denseh, 0, 0, _state); } else { rmatrixtranspose(state->n, state->n, p, 0, 0, &state->denseh, 0, 0, _state); } } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: D[i] should be positive. Exception will be thrown otherwise. NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecdiag(minlbfgsstate* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(d->cnt>=state->n, "MinLBFGSSetPrecDiag: D is too short", _state); for(i=0; i<=state->n-1; i++) { ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinLBFGSSetPrecDiag: D contains infinite or NAN elements", _state); ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "MinLBFGSSetPrecDiag: D contains non-positive elements", _state); } rvectorsetlengthatleast(&state->diagh, state->n, _state); state->prectype = 2; for(i=0; i<=state->n-1; i++) { state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; } } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() call (before or after MinLBFGSSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecscale(minlbfgsstate* state, ae_state *_state) { state->prectype = 3; } /************************************************************************* This function sets low-rank preconditioner for Hessian matrix H=D+W'*C*W, where: * H is a Hessian matrix, which is approximated by D/W/C * D is a NxN diagonal positive definite matrix * W is a KxN low-rank correction * C is a KxK positive definite diagonal factor of low-rank correction This preconditioner is inexact but fast - it requires O(N*K) time to be applied. Preconditioner P is calculated by artificially constructing a set of BFGS updates which tries to reproduce behavior of H: * Sk = Wk (k-th row of W) * Yk = (D+Wk'*Ck*Wk)*Sk * Yk/Sk are reordered by ascending of C[k]*norm(Wk)^2 Here we assume that rows of Wk are orthogonal or nearly orthogonal, which allows us to have O(N*K+K^2) update instead of O(N*K^2) one. Reordering of updates is essential for having good performance on non-orthogonal problems (updates which do not add much of curvature are added first, and updates which add very large eigenvalues are added last and override effect of the first updates). In practice, this preconditioner is perfect when ortogonal correction is applied; on non-orthogonal problems sometimes it allows to achieve 5x speedup (when compared to non-preconditioned solver). -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetprecrankklbfgsfast(minlbfgsstate* state, /* Real */ ae_vector* d, /* Real */ ae_vector* c, /* Real */ ae_matrix* w, ae_int_t cnt, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t n; n = state->n; state->prectype = 4; state->preck = cnt; rvectorsetlengthatleast(&state->precc, cnt, _state); rvectorsetlengthatleast(&state->precd, n, _state); rmatrixsetlengthatleast(&state->precw, cnt, n, _state); for(i=0; i<=n-1; i++) { state->precd.ptr.p_double[i] = d->ptr.p_double[i]; } for(i=0; i<=cnt-1; i++) { state->precc.ptr.p_double[i] = c->ptr.p_double[i]; for(j=0; j<=n-1; j++) { state->precw.ptr.pp_double[i][j] = w->ptr.pp_double[i][j]; } } } /************************************************************************* This function sets exact low-rank preconditioner for Hessian matrix H=D+W'*C*W, where: * H is a Hessian matrix, which is approximated by D/W/C * D is a NxN diagonal positive definite matrix * W is a KxN low-rank correction * C is a KxK semidefinite diagonal factor of low-rank correction This preconditioner is exact but slow - it requires O(N*K^2) time to be built and O(N*K) time to be applied. Woodbury matrix identity is used to build inverse matrix. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetpreclowrankexact(minlbfgsstate* state, /* Real */ ae_vector* d, /* Real */ ae_vector* c, /* Real */ ae_matrix* w, ae_int_t cnt, ae_state *_state) { state->prectype = 5; preparelowrankpreconditioner(d, c, w, state->n, cnt, &state->lowrankbuf, _state); } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied gradient, and one which uses function value only and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF() for numerical differentiation) you should choose appropriate variant of MinLBFGSOptimize() - one which accepts function AND gradient or one which accepts function ONLY. Be careful to choose variant of MinLBFGSOptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to MinLBFGSOptimize() and specific function used to create optimizer. | USER PASSED TO MinLBFGSOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ MinLBFGSCreateF() | work FAIL MinLBFGSCreate() | FAIL work Here "FAIL" denotes inappropriate combinations of optimizer creation function and MinLBFGSOptimize() version. Attemps to use such combination (for example, to create optimizer with MinLBFGSCreateF() and to pass gradient information to MinCGOptimize()) will lead to exception being thrown. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ ae_bool minlbfgsiteration(minlbfgsstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t ic; ae_int_t mcinfo; double v; double vv; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; m = state->rstate.ia.ptr.p_int[1]; i = state->rstate.ia.ptr.p_int[2]; j = state->rstate.ia.ptr.p_int[3]; ic = state->rstate.ia.ptr.p_int[4]; mcinfo = state->rstate.ia.ptr.p_int[5]; v = state->rstate.ra.ptr.p_double[0]; vv = state->rstate.ra.ptr.p_double[1]; } else { n = 359; m = -58; i = -919; j = -909; ic = 81; mcinfo = 255; v = 74; vv = -788; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } /* * Routine body */ /* * Unload frequently used variables from State structure * (just for typing convinience) */ n = state->n; m = state->m; /* * Init */ state->userterminationneeded = ae_false; state->repterminationtype = 0; state->repiterationscount = 0; state->repnfev = 0; smoothnessmonitorinit(&state->smonitor, n, 1, state->smoothnessguardlevel>0, _state); rvectorsetlengthatleast(&state->invs, n, _state); for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; state->invs.ptr.p_double[i] = 1/state->s.ptr.p_double[i]; } /* * Check, that transferred derivative value is right */ state->stp = (double)(0); minlbfgs_clearrequestfields(state, _state); if( !(ae_fp_eq(state->diffstep,(double)(0))&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_15; } lbl_17: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xbase, &state->s, &state->s, &state->s, ae_false, state->teststep, _state) ) { goto lbl_18; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfg = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfg = ae_false; state->smonitor.fi.ptr.p_double[0] = state->f; for(i=0; i<=n-1; i++) { state->smonitor.j.ptr.pp_double[0][i] = state->g.ptr.p_double[i]; } goto lbl_17; lbl_18: lbl_15: /* * Calculate F/G at the initial point */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xbase.ptr.p_double[i]; } state->stp = (double)(0); minlbfgs_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_19; } state->needfg = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needfg = ae_false; goto lbl_20; lbl_19: state->needf = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->fbase = state->f; i = 0; lbl_21: if( i>n-1 ) { goto lbl_23; } v = state->x.ptr.p_double[i]; state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->fp2 = state->f; state->x.ptr.p_double[i] = v; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); i = i+1; goto lbl_21; lbl_23: state->f = state->fbase; state->needf = ae_false; lbl_20: trimprepare(state->f, &state->trimthreshold, _state); if( !state->xrep ) { goto lbl_24; } minlbfgs_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->xupdated = ae_false; lbl_24: if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; result = ae_false; return result; } state->repnfev = 1; state->fold = state->f; v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) { state->repterminationtype = 4; result = ae_false; return result; } /* * Choose initial step and direction. * Apply preconditioner, if we have something other than default. */ ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( state->prectype==0 ) { /* * Default preconditioner is used, but we can't use it before iterations will start */ v = ae_v_dotproduct(&state->g.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = ae_sqrt(v, _state); if( ae_fp_eq(state->stpmax,(double)(0)) ) { state->stp = ae_minreal(1.0/v, (double)(1), _state); } else { state->stp = ae_minreal(1.0/v, state->stpmax, _state); } } if( state->prectype==1 ) { /* * Cholesky preconditioner is used */ fblscholeskysolve(&state->denseh, 1.0, n, ae_true, &state->d, &state->autobuf, _state); state->stp = (double)(1); } if( state->prectype==2 ) { /* * diagonal approximation is used */ for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = state->d.ptr.p_double[i]/state->diagh.ptr.p_double[i]; } state->stp = (double)(1); } if( state->prectype==3 ) { /* * scale-based preconditioner is used */ for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = state->d.ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; } state->stp = (double)(1); } if( state->prectype==4 ) { /* * rank-k BFGS-based preconditioner is used */ inexactlbfgspreconditioner(&state->d, n, &state->precd, &state->precc, &state->precw, state->preck, &state->precbuf, _state); state->stp = (double)(1); } if( state->prectype==5 ) { /* * exact low-rank preconditioner is used */ applylowrankpreconditioner(&state->d, &state->lowrankbuf, _state); state->stp = (double)(1); } /* * Main cycle */ state->k = 0; lbl_26: if( ae_false ) { goto lbl_27; } /* * Main cycle: prepare to 1-D line search */ state->p = state->k%m; state->q = ae_minint(state->k, m-1, _state); /* * Store X[k], G[k] */ ae_v_move(&state->xp.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_moveneg(&state->sk.ptr.pp_double[state->p][0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_moveneg(&state->yk.ptr.pp_double[state->p][0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Minimize F(x+alpha*d) * Calculate S[k], Y[k] */ state->mcstage = 0; if( state->k!=0 ) { state->stp = 1.0; } linminnormalized(&state->d, &state->stp, n, _state); smoothnessmonitorstartlinesearch1u(&state->smonitor, &state->s, &state->invs, &state->x, state->f, &state->g, _state); mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->stpmax, minlbfgs_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); lbl_28: if( state->mcstage==0 ) { goto lbl_29; } minlbfgs_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_30; } state->needfg = ae_true; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->needfg = ae_false; goto lbl_31; lbl_30: state->needf = ae_true; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->fbase = state->f; i = 0; lbl_32: if( i>n-1 ) { goto lbl_34; } v = state->x.ptr.p_double[i]; state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->fp2 = state->f; state->x.ptr.p_double[i] = v; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); i = i+1; goto lbl_32; lbl_34: state->f = state->fbase; state->needf = ae_false; lbl_31: smoothnessmonitorenqueuepoint1u(&state->smonitor, &state->s, &state->invs, &state->d, state->stp, &state->x, state->f, &state->g, _state); trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->stpmax, minlbfgs_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); goto lbl_28; lbl_29: smoothnessmonitorfinalizelinesearch(&state->smonitor, _state); if( state->userterminationneeded ) { /* * User requested termination. * Restore previous point and return. */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xp.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } if( !state->xrep ) { goto lbl_35; } /* * report */ minlbfgs_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->xupdated = ae_false; lbl_35: state->repnfev = state->repnfev+state->nfev; state->repiterationscount = state->repiterationscount+1; ae_v_add(&state->sk.ptr.pp_double[state->p][0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->yk.ptr.pp_double[state->p][0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Stopping conditions */ v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( !ae_isfinite(v, _state)||!ae_isfinite(state->f, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } if( state->repiterationscount>=state->maxits&&state->maxits>0 ) { /* * Too many iterations */ state->repterminationtype = 5; result = ae_false; return result; } if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) { /* * Gradient is small enough */ state->repterminationtype = 4; result = ae_false; return result; } if( ae_fp_less_eq(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) { /* * F(k+1)-F(k) is small enough */ state->repterminationtype = 1; result = ae_false; return result; } v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->sk.ptr.pp_double[state->p][i]/state->s.ptr.p_double[i], _state); } if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsx) ) { /* * X(k+1)-X(k) is small enough */ state->repterminationtype = 2; result = ae_false; return result; } /* * If Wolfe conditions are satisfied, we can update * limited memory model. * * However, if conditions are not satisfied (NFEV limit is met, * function is too wild, ...), we'll skip L-BFGS update */ if( mcinfo!=1 ) { /* * Skip update. * * In such cases we'll initialize search direction by * antigradient vector, because it leads to more * transparent code with less number of special cases */ state->fold = state->f; ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); } else { /* * Calculate Rho[k], GammaK */ v = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->sk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); vv = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->yk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); if( ae_fp_eq(v,(double)(0))||ae_fp_eq(vv,(double)(0)) ) { /* * Rounding errors make further iterations impossible. */ state->repterminationtype = -2; result = ae_false; return result; } state->rho.ptr.p_double[state->p] = 1/v; state->gammak = v/vv; /* * Calculate d(k+1) = -H(k+1)*g(k+1) * * for I:=K downto K-Q do * V = s(i)^T * work(iteration:I) * theta(i) = V * work(iteration:I+1) = work(iteration:I) - V*Rho(i)*y(i) * work(last iteration) = H0*work(last iteration) - preconditioner * for I:=K-Q to K do * V = y(i)^T*work(iteration:I) * work(iteration:I+1) = work(iteration:I) +(-V+theta(i))*Rho(i)*s(i) * * NOW WORK CONTAINS d(k+1) */ ae_v_move(&state->work.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=state->k; i>=state->k-state->q; i--) { ic = i%m; v = ae_v_dotproduct(&state->sk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->theta.ptr.p_double[ic] = v; vv = v*state->rho.ptr.p_double[ic]; ae_v_subd(&state->work.ptr.p_double[0], 1, &state->yk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); } if( state->prectype==0 ) { /* * Simple preconditioner is used */ v = state->gammak; ae_v_muld(&state->work.ptr.p_double[0], 1, ae_v_len(0,n-1), v); } if( state->prectype==1 ) { /* * Cholesky preconditioner is used */ fblscholeskysolve(&state->denseh, (double)(1), n, ae_true, &state->work, &state->autobuf, _state); } if( state->prectype==2 ) { /* * diagonal approximation is used */ for(i=0; i<=n-1; i++) { state->work.ptr.p_double[i] = state->work.ptr.p_double[i]/state->diagh.ptr.p_double[i]; } } if( state->prectype==3 ) { /* * scale-based preconditioner is used */ for(i=0; i<=n-1; i++) { state->work.ptr.p_double[i] = state->work.ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; } } if( state->prectype==4 ) { /* * Rank-K BFGS-based preconditioner is used */ inexactlbfgspreconditioner(&state->work, n, &state->precd, &state->precc, &state->precw, state->preck, &state->precbuf, _state); } if( state->prectype==5 ) { /* * Exact low-rank preconditioner is used */ applylowrankpreconditioner(&state->work, &state->lowrankbuf, _state); } for(i=state->k-state->q; i<=state->k; i++) { ic = i%m; v = ae_v_dotproduct(&state->yk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); vv = state->rho.ptr.p_double[ic]*(-v+state->theta.ptr.p_double[ic]); ae_v_addd(&state->work.ptr.p_double[0], 1, &state->sk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); } ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Next step */ state->fold = state->f; state->k = state->k+1; } goto lbl_26; lbl_27: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = m; state->rstate.ia.ptr.p_int[2] = i; state->rstate.ia.ptr.p_int[3] = j; state->rstate.ia.ptr.p_int[4] = ic; state->rstate.ia.ptr.p_int[5] = mcinfo; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = vv; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minlbfgsoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minlbfgssetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardgradient(minlbfgsstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinLBFGSOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinLBFGSOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardsmoothness(minlbfgsstate* state, ae_int_t level, ae_state *_state) { ae_assert(level==0||level==1, "MinLBFGSOptGuardSmoothness: unexpected value of level parameter", _state); state->smoothnessguardlevel = level; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minlbfgsoptguardgradient() for gradient verification * minlbfgsoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minlbfgsoptguardnonc1test0results() * minlbfgsoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardresults(minlbfgsstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardnonc1test0results(minlbfgsstate* state, optguardnonc1test0report* strrep, optguardnonc1test0report* lngrep, ae_state *_state) { _optguardnonc1test0report_clear(strrep); _optguardnonc1test0report_clear(lngrep); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlbfgsoptguardnonc1test1results(minlbfgsstate* state, optguardnonc1test1report* strrep, optguardnonc1test1report* lngrep, ae_state *_state) { _optguardnonc1test1report_clear(strrep); _optguardnonc1test1report_clear(lngrep); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* L-BFGS algorithm results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report: * Rep.TerminationType completetion code: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -2 rounding errors prevent further improvement. X contains best point found. * -1 incorrect parameters were specified * 1 relative function improvement is no more than EpsF. * 2 relative step is no more than EpsX. * 4 gradient norm is no more than EpsG * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible * 8 terminated by user who called minlbfgsrequesttermination(). X contains point which was "current accepted" when termination request was submitted. * Rep.IterationsCount contains iterations count * NFEV countains number of function calculations -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsresults(minlbfgsstate* state, /* Real */ ae_vector* x, minlbfgsreport* rep, ae_state *_state) { ae_vector_clear(x); _minlbfgsreport_clear(rep); minlbfgsresultsbuf(state, x, rep, _state); } /************************************************************************* L-BFGS algorithm results Buffered implementation of MinLBFGSResults which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 20.08.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsresultsbuf(minlbfgsstate* state, /* Real */ ae_vector* x, minlbfgsreport* rep, ae_state *_state) { if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); rep->iterationscount = state->repiterationscount; rep->nfev = state->repnfev; rep->terminationtype = state->repterminationtype; } /************************************************************************* This subroutine restarts LBFGS algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used to store algorithm state X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgsrestartfrom(minlbfgsstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_assert(x->cnt>=state->n, "MinLBFGSRestartFrom: Length(X)n, _state), "MinLBFGSRestartFrom: X contains infinite or NaN values!", _state); ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); ae_vector_set_length(&state->rstate.ia, 5+1, _state); ae_vector_set_length(&state->rstate.ra, 1+1, _state); state->rstate.stage = -1; minlbfgs_clearrequestfields(state, _state); } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minlbfgsrequesttermination(minlbfgsstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* Clears request fileds (to be sure that we don't forgot to clear something) *************************************************************************/ static void minlbfgs_clearrequestfields(minlbfgsstate* state, ae_state *_state) { state->needf = ae_false; state->needfg = ae_false; state->xupdated = ae_false; } void _minlbfgsstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlbfgsstate *p = (minlbfgsstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rho, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->yk, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->sk, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->theta, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->denseh, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->precc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->precd, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->precw, 0, 0, DT_REAL, _state, make_automatic); _precbuflbfgs_init(&p->precbuf, _state, make_automatic); _precbuflowrank_init(&p->lowrankbuf, _state, make_automatic); ae_vector_init(&p->autobuf, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); _linminstate_init(&p->lstate, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); } void _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlbfgsstate *dst = (minlbfgsstate*)_dst; minlbfgsstate *src = (minlbfgsstate*)_src; dst->n = src->n; dst->m = src->m; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->stpmax = src->stpmax; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); dst->diffstep = src->diffstep; dst->nfev = src->nfev; dst->mcstage = src->mcstage; dst->k = src->k; dst->q = src->q; dst->p = src->p; ae_vector_init_copy(&dst->rho, &src->rho, _state, make_automatic); ae_matrix_init_copy(&dst->yk, &src->yk, _state, make_automatic); ae_matrix_init_copy(&dst->sk, &src->sk, _state, make_automatic); ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic); ae_vector_init_copy(&dst->theta, &src->theta, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->stp = src->stp; ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic); dst->fold = src->fold; dst->trimthreshold = src->trimthreshold; ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); dst->prectype = src->prectype; dst->gammak = src->gammak; ae_matrix_init_copy(&dst->denseh, &src->denseh, _state, make_automatic); ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic); ae_vector_init_copy(&dst->precc, &src->precc, _state, make_automatic); ae_vector_init_copy(&dst->precd, &src->precd, _state, make_automatic); ae_matrix_init_copy(&dst->precw, &src->precw, _state, make_automatic); dst->preck = src->preck; _precbuflbfgs_init_copy(&dst->precbuf, &src->precbuf, _state, make_automatic); _precbuflowrank_init_copy(&dst->lowrankbuf, &src->lowrankbuf, _state, make_automatic); dst->fbase = src->fbase; dst->fm2 = src->fm2; dst->fm1 = src->fm1; dst->fp1 = src->fp1; dst->fp2 = src->fp2; ae_vector_init_copy(&dst->autobuf, &src->autobuf, _state, make_automatic); ae_vector_init_copy(&dst->invs, &src->invs, _state, make_automatic); ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needf = src->needf; dst->needfg = src->needfg; dst->xupdated = src->xupdated; dst->userterminationneeded = src->userterminationneeded; dst->teststep = src->teststep; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); dst->repiterationscount = src->repiterationscount; dst->repnfev = src->repnfev; dst->repterminationtype = src->repterminationtype; _linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic); dst->smoothnessguardlevel = src->smoothnessguardlevel; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); } void _minlbfgsstate_clear(void* _p) { minlbfgsstate *p = (minlbfgsstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_vector_clear(&p->rho); ae_matrix_clear(&p->yk); ae_matrix_clear(&p->sk); ae_vector_clear(&p->xp); ae_vector_clear(&p->theta); ae_vector_clear(&p->d); ae_vector_clear(&p->work); ae_vector_clear(&p->xbase); ae_matrix_clear(&p->denseh); ae_vector_clear(&p->diagh); ae_vector_clear(&p->precc); ae_vector_clear(&p->precd); ae_matrix_clear(&p->precw); _precbuflbfgs_clear(&p->precbuf); _precbuflowrank_clear(&p->lowrankbuf); ae_vector_clear(&p->autobuf); ae_vector_clear(&p->invs); ae_vector_clear(&p->x); ae_vector_clear(&p->g); _rcommstate_clear(&p->rstate); _linminstate_clear(&p->lstate); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); } void _minlbfgsstate_destroy(void* _p) { minlbfgsstate *p = (minlbfgsstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_vector_destroy(&p->rho); ae_matrix_destroy(&p->yk); ae_matrix_destroy(&p->sk); ae_vector_destroy(&p->xp); ae_vector_destroy(&p->theta); ae_vector_destroy(&p->d); ae_vector_destroy(&p->work); ae_vector_destroy(&p->xbase); ae_matrix_destroy(&p->denseh); ae_vector_destroy(&p->diagh); ae_vector_destroy(&p->precc); ae_vector_destroy(&p->precd); ae_matrix_destroy(&p->precw); _precbuflbfgs_destroy(&p->precbuf); _precbuflowrank_destroy(&p->lowrankbuf); ae_vector_destroy(&p->autobuf); ae_vector_destroy(&p->invs); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); _rcommstate_destroy(&p->rstate); _linminstate_destroy(&p->lstate); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); } void _minlbfgsreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlbfgsreport *p = (minlbfgsreport*)_p; ae_touch_ptr((void*)p); } void _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlbfgsreport *dst = (minlbfgsreport*)_dst; minlbfgsreport *src = (minlbfgsreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->terminationtype = src->terminationtype; } void _minlbfgsreport_clear(void* _p) { minlbfgsreport *p = (minlbfgsreport*)_p; ae_touch_ptr((void*)p); } void _minlbfgsreport_destroy(void* _p) { minlbfgsreport *p = (minlbfgsreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This function initializes QPDENSEAULSettings structure with default settings. Newly created structure MUST be initialized by default settings - or by copy of the already initialized structure. -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qpdenseaulloaddefaults(ae_int_t nmain, qpdenseaulsettings* s, ae_state *_state) { s->epsx = 1.0E-6; s->outerits = 5; s->rho = 100.0; } /************************************************************************* This function runs Dense-AUL solver; it returns after optimization process was completed. Following QP problem is solved: min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) subject to combination of box and general linear dense/sparse constraints. INPUT PARAMETERS: DenseA - for dense problems (AKind=0), A-term of CQM object contains system matrix. Other terms are unspecified and should not be referenced. SparseA - for sparse problems (AKind=1), CRS format AKind - sparse matrix format: * 0 for dense matrix * 1 for sparse matrix SparseUpper - which triangle of SparseAC stores matrix - upper or lower one (for dense matrices this parameter is not actual). B - linear term, array[N] BndL - lower bound, array[N] BndU - upper bound, array[N] S - scale vector, array[NC]: * I-th element contains scale of I-th variable, * SC[I]>0 XOrigin - origin term, array[NC]. Can be zero. N - number of variables in the original formulation (no slack variables). CLEIC - dense linear equality/inequality constraints. Equality constraints come first. NEC, NIC - number of dense equality/inequality constraints. SCLEIC - sparse linear equality/inequality constraints. Equality constraints come first. SNEC, SNIC - number of sparse equality/inequality constraints. RenormLC - whether constraints should be renormalized (recommended) or used "as is". Settings - QPDENSEAULSettings object initialized by one of the initialization functions. State - object which stores temporaries XS - initial point, array[NC] OUTPUT PARAMETERS: XS - last point TerminationType-termination type: * * * -- ALGLIB -- Copyright 2017 by Bochkanov Sergey *************************************************************************/ void qpdenseauloptimize(convexquadraticmodel* a, sparsematrix* sparsea, ae_int_t akind, ae_bool sparseaupper, /* Real */ ae_vector* b, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t nn, /* Real */ ae_matrix* cleic, ae_int_t dnec, ae_int_t dnic, sparsematrix* scleic, ae_int_t snec, ae_int_t snic, ae_bool renormlc, qpdenseaulsettings* settings, qpdenseaulbuffers* state, /* Real */ ae_vector* xs, /* Real */ ae_vector* lagbc, /* Real */ ae_vector* laglc, ae_int_t* terminationtype, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; double v; double vv; double rho; double epsx; ae_int_t outeridx; ae_int_t nmain; ae_int_t nslack; ae_int_t ntotal; ae_int_t ktotal; double maxrho; double feaserr; double feaserrprev; double requestedfeasdecrease; ae_int_t goodcounter; ae_int_t stagnationcounter; ae_int_t nectotal; ae_int_t nictotal; ae_int_t nicwork; ae_int_t kwork; ae_int_t nwork; ae_bool allowwseviction; ae_bool workingsetextended; double targetscale; *terminationtype = 0; nmain = nn; nslack = dnic+snic; ntotal = nmain+nslack; nectotal = dnec+snec; nictotal = dnic+snic; ktotal = dnec+dnic+snec+snic; rho = settings->rho; epsx = settings->epsx; requestedfeasdecrease = 0.33; maxrho = 1.0E12; if( ae_fp_less_eq(epsx,(double)(0)) ) { epsx = 1.0E-9; } /* * Integrity checks */ if( snec+snic>0 ) { ae_assert(scleic->matrixtype==1, "QPDENSEAULOptimize: unexpected sparse matrix format", _state); ae_assert(scleic->m==snec+snic, "QPDENSEAULOptimize: unexpected sparse matrix size", _state); ae_assert(scleic->n==nmain+1, "QPDENSEAULOptimize: unexpected sparse matrix size", _state); } /* * Prepare */ state->repinneriterationscount = 0; state->repouteriterationscount = 0; state->repncholesky = 0; state->repnmv = 0; state->repnwrkchanges = 0; state->repnwrk0 = 0; state->repnwrk1 = 0; state->repnwrkf = 0; *terminationtype = 0; ivectorsetlengthatleast(&state->cidx, ktotal, _state); rvectorsetlengthatleast(&state->nulc, ktotal, _state); rvectorsetlengthatleast(&state->nulcest, ktotal, _state); rvectorsetlengthatleast(&state->exb, ntotal, _state); rvectorsetlengthatleast(&state->exxc, ntotal, _state); rvectorsetlengthatleast(&state->exxorigin, ntotal, _state); rvectorsetlengthatleast(&state->exbndl, ntotal, _state); rvectorsetlengthatleast(&state->exbndu, ntotal, _state); rvectorsetlengthatleast(&state->exscale, ntotal, _state); rvectorsetlengthatleast(&state->tmp0, ntotal, _state); rvectorsetlengthatleast(&state->nicerr, nictotal, _state); ivectorsetlengthatleast(&state->nicnact, nictotal, _state); /* * Allocate Lagrange multipliers, fill by default values (zeros) */ rvectorsetlengthatleast(lagbc, nmain, _state); rvectorsetlengthatleast(laglc, ktotal, _state); for(i=0; i<=nmain-1; i++) { lagbc->ptr.p_double[i] = 0.0; } for(i=0; i<=ktotal-1; i++) { laglc->ptr.p_double[i] = 0.0; } /* * Prepare scaled/shifted model in dense format - input parameters * are converted and stored in State.SclSftA/B/HasBndL/HasBndU/BndL/BndU/CLEIC/XC/CScales */ qpdenseaulsolver_scaleshiftoriginalproblem(a, sparsea, akind, sparseaupper, b, bndl, bndu, s, xorigin, nmain, cleic, dnec, dnic, scleic, snec, snic, renormlc, state, xs, _state); /* * Normalize model in such way that norm(A)~1 (very roughly) * * We have two lower bounds for sigma_max(A): * * first estimate is provided by Frobenius norm, it is equal to ANorm/NMain * * second estimate is provided by max(CAC) * * We select largest one of these estimates, because using just one * of them is prone to different failure modes. Then, we divide A and B * by this estimate. */ targetscale = qpdenseaulsolver_normalizequadraticterm(&state->sclsfta, &state->sclsftb, nmain, &state->sclsftcleic, nectotal, nictotal, renormlc, &state->tmp2, _state); /* * Select working set of inequality constraints. * * Although it is possible to process all inequality constraints * at once, in one large batch, some QP problems have NIC>>N constraints, * but only minor fraction of them is inactive in the solution. * * Because algorithm running time is O((N+NEC+NIC)^3), we can * save a lot of time if we process only those inequality constraints * which need activation. Generally, NECsclsfta, nmain, &state->sclsftcleic, nectotal, nictotal, &state->tmp0, &state->tmp2, &nicwork, &allowwseviction, _state); kwork = nectotal+nicwork; nwork = nmain+nicwork; state->repnwrk0 = nicwork; for(i=0; i<=nicwork-1; i++) { state->nicnact.ptr.p_int[i] = 1; } for(i=nicwork; i<=nictotal-1; i++) { state->nicnact.ptr.p_int[i] = 0; } for(i=0; i<=ktotal-1; i++) { state->cidx.ptr.p_int[i] = i; } /* * Perform outer iteration */ for(i=0; i<=ktotal-1; i++) { state->nulc.ptr.p_double[i] = (double)(0); } for(i=0; i<=ntotal-1; i++) { state->exscale.ptr.p_double[i] = 1.0; state->exxorigin.ptr.p_double[i] = 0.0; } qpdenseaulsolver_generateexinitialpoint(&state->sclsftxc, nmain, nslack, &state->exxc, _state); goodcounter = 0; stagnationcounter = 0; feaserr = ae_maxrealnumber; for(outeridx=0; outeridx<=settings->outerits-1; outeridx++) { /* * Repeat loop until working set stabilizes. */ do { /* * Preallocate space for ExA and for QQP solver; we do not allocate * array[NTotal,NTotal] from the start because NTotal can be much * larger than NMain for problems with large amount of inequality * constraints, and we usually need NWork=O(NMain). * * NOTE: for the sake of simplicity, 1-dimensional arrays were * preallocated to the maximum size required (NTotal). */ if( state->exa.rowsexa.colsexa, i, i, _state); } qqppreallocategrowdense(&state->qqpbuf, nwork, i, _state); /* * Generate penalized quadratic model */ qpdenseaulsolver_generateexmodel(&state->sclsfta, &state->sclsftb, nmain, &state->sclsftbndl, &state->sclsfthasbndl, &state->sclsftbndu, &state->sclsfthasbndu, &state->sclsftcleic, nectotal, nicwork, &state->nulc, rho, &state->exa, &state->exb, &state->exbndl, &state->exbndu, &state->tmp2, _state); /* * Solve extended QP problem subject to current working set of general * inequality constraints. */ qqploaddefaults(nwork, &state->qqpsettingsuser, _state); state->qqpsettingsuser.maxouterits = 50; state->qqpsettingsuser.epsg = 0.0; state->qqpsettingsuser.epsf = 0.0; state->qqpsettingsuser.epsx = 0.01*epsx; state->qqpsettingsuser.cnphase = ae_true; qqpoptimize(&state->dummycqm, &state->dummysparse, &state->exa, 2, ae_true, &state->exb, &state->exbndl, &state->exbndu, &state->exscale, &state->exxorigin, nwork, &state->qqpsettingsuser, &state->qqpbuf, &state->exxc, &k, _state); state->repncholesky = state->repncholesky+state->qqpbuf.repncholesky; /* * Evaluate violation of constraints */ for(i=0; i<=nictotal-1; i++) { v = ae_v_dotproduct(&state->sclsftcleic.ptr.pp_double[nectotal+i][0], 1, &state->exxc.ptr.p_double[0], 1, ae_v_len(0,nmain-1)); v = v-state->sclsftcleic.ptr.pp_double[nectotal+i][nmain]; state->nicerr.ptr.p_double[i] = v; } /* * Working set expansion: * * select limited amount of most violated constraints * * perform permutation of non-work constraints such that * candidate constraint is first the list (update XC and NuLC) * * increase working set size by 1 * * increase activation count for new constraint by 1 (this count * is used later by working set eviction phase) * * repeat * * NOTE: we use selection sort algorithm because its O(NAdded*NWork) cost * is still comparable to the cost of constraints evaluation */ workingsetextended = ae_false; i = 0; while(ae_fp_less((double)(i),1+qpdenseaulsolver_expansionratio*nmain)&&nicworknicerr.ptr.p_double[j],state->nicerr.ptr.p_double[k]) ) { k = j; } } /* * If violation is positive, add it */ if( ae_fp_greater(state->nicerr.ptr.p_double[k],(double)(0)) ) { swaprows(&state->sclsftcleic, nectotal+nicwork, nectotal+k, -1, _state); swapelements(&state->nicerr, nicwork, k, _state); swapelementsi(&state->nicnact, nicwork, k, _state); swapelementsi(&state->cidx, nectotal+nicwork, nectotal+k, _state); swapelements(&state->cscales, nectotal+nicwork, nectotal+k, _state); state->exxc.ptr.p_double[nmain+nicwork] = 0.0; state->nulc.ptr.p_double[nectotal+nicwork] = 0.0; state->nicnact.ptr.p_int[nicwork] = state->nicnact.ptr.p_int[nicwork]+1; inc(&nicwork, _state); inc(&nwork, _state); inc(&kwork, _state); inc(&i, _state); workingsetextended = ae_true; } else { break; } } /* * Working set eviction: * * select constraints which are (1) far away from the * boundary, AND (2) has less than two activation attempts * (if constraint is regularly activated/deactivated, we keep * it in the working set no matter what) * * remove such constraints from the working set one by one */ if( allowwseviction ) { for(k=nicwork-1; k>=0; k--) { if( ae_fp_less(state->nicerr.ptr.p_double[k],qpdenseaulsolver_evictionlevel)&&state->nicnact.ptr.p_int[k]<=1 ) { swaprows(&state->sclsftcleic, nectotal+nicwork-1, nectotal+k, -1, _state); swapelementsi(&state->cidx, nectotal+nicwork-1, nectotal+k, _state); swapelements(&state->cscales, nectotal+nicwork-1, nectotal+k, _state); swapelements(&state->nicerr, nicwork-1, k, _state); swapelementsi(&state->nicnact, nicwork-1, k, _state); swapelements(&state->exxc, nmain+nicwork-1, nmain+k, _state); swapelements(&state->nulc, nectotal+nicwork-1, nectotal+k, _state); dec(&nicwork, _state); dec(&nwork, _state); dec(&kwork, _state); } } } /* * Report working set statistics */ if( state->repnwrk1==0 ) { state->repnwrk1 = nicwork; } state->repnwrkf = nicwork; if( workingsetextended ) { inc(&state->repnwrkchanges, _state); } } while(workingsetextended); /* * Estimate Lagrange multipliers using alternative algorithm */ ae_v_move(&state->nulcest.ptr.p_double[0], 1, &state->nulc.ptr.p_double[0], 1, ae_v_len(0,kwork-1)); qpdenseaulsolver_updatelagrangemultipliers(&state->sclsfta, &state->sclsftb, nmain, &state->sclsftbndl, &state->sclsfthasbndl, &state->sclsftbndu, &state->sclsfthasbndu, &state->sclsftcleic, nectotal, nicwork, &state->exxc, &state->nulcest, state, _state); /* * Update XC and Lagrange multipliers */ feaserrprev = feaserr; feaserr = (double)(0); for(i=0; i<=kwork-1; i++) { /* * Calculate I-th feasibility error in V using formula for distance * between point and line (here we calculate actual distance between * XN and hyperplane Ci'*XN=Bi, which is different from error Ci'*XN-Bi). */ v = (double)(0); vv = (double)(0); for(j=0; j<=nmain-1; j++) { v = v+state->sclsftcleic.ptr.pp_double[i][j]*state->exxc.ptr.p_double[j]; vv = vv+ae_sqr(state->sclsftcleic.ptr.pp_double[i][j], _state); } if( i>=nectotal ) { v = v+state->exxc.ptr.p_double[nmain+(i-nectotal)]; vv = vv+ae_sqr((double)(1), _state); } v = v-state->sclsftcleic.ptr.pp_double[i][nmain]; vv = coalesce(vv, (double)(1), _state); v = v/ae_sqrt(vv, _state); /* * Calculate magnitude of Lagrangian update (and Lagrangian parameters themselves) */ feaserr = feaserr+ae_sqr(v, _state); state->nulc.ptr.p_double[i] = state->nulcest.ptr.p_double[i]; } feaserr = ae_sqrt(feaserr, _state); if( ae_fp_less(feaserr,epsx) ) { inc(&goodcounter, _state); } else { goodcounter = 0; } if( ae_fp_greater(feaserr,feaserrprev*requestedfeasdecrease) ) { inc(&stagnationcounter, _state); } else { stagnationcounter = 0; } if( goodcounter>=2 ) { break; } if( stagnationcounter>=2 ) { rho = ae_minreal(rho*10.0, maxrho, _state); } else { rho = ae_minreal(rho*1.41, maxrho, _state); } } /* * Convert Lagrange multipliers from internal format to one expected * by caller: * * reorder multipliers for linear constraints * * compute residual from gradient+linearconstraints * * compute multipliers for box constraints from residual * * rescale everything */ for(i=0; i<=nectotal+nicwork-1; i++) { laglc->ptr.p_double[state->cidx.ptr.p_int[i]] = -state->nulc.ptr.p_double[i]*targetscale/state->cscales.ptr.p_double[i]; } rvectorsetlengthatleast(&state->tmpg, nmain, _state); for(i=0; i<=nmain-1; i++) { v = state->sclsftb.ptr.p_double[i]; for(j=0; j<=nmain-1; j++) { v = v+state->sclsfta.ptr.pp_double[i][j]*state->exxc.ptr.p_double[j]; } state->tmpg.ptr.p_double[i] = v; } rmatrixgemv(nmain, nectotal+nicwork, -1.0, &state->sclsftcleic, 0, 0, 1, &state->nulc, 0, 1.0, &state->tmpg, 0, _state); for(i=0; i<=nmain-1; i++) { if( (state->sclsfthasbndl.ptr.p_bool[i]&&ae_fp_eq(state->exxc.ptr.p_double[i],state->sclsftbndl.ptr.p_double[i]))||(state->sclsfthasbndu.ptr.p_bool[i]&&ae_fp_eq(state->exxc.ptr.p_double[i],state->sclsftbndu.ptr.p_double[i])) ) { lagbc->ptr.p_double[i] = -state->tmpg.ptr.p_double[i]; } } for(i=0; i<=nmain-1; i++) { lagbc->ptr.p_double[i] = lagbc->ptr.p_double[i]*targetscale/s->ptr.p_double[i]; } /* * Unpack results. * * Add XOrigin to XC and make sure that boundary constraints are * satisfied. */ for(i=0; i<=nmain-1; i++) { /* * Unscale/unshift */ xs->ptr.p_double[i] = s->ptr.p_double[i]*state->exxc.ptr.p_double[i]+xorigin->ptr.p_double[i]; /* * Make sure that point is feasible w.r.t. box constraints. * Enforce box constraints which were active in the scaled/shifted solution. */ if( state->sclsfthasbndl.ptr.p_bool[i] ) { if( ae_fp_less(xs->ptr.p_double[i],bndl->ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndl->ptr.p_double[i]; } if( ae_fp_eq(state->exxc.ptr.p_double[i],state->sclsftbndl.ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndl->ptr.p_double[i]; } } if( state->sclsfthasbndu.ptr.p_bool[i] ) { if( ae_fp_greater(xs->ptr.p_double[i],bndu->ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndu->ptr.p_double[i]; } if( ae_fp_eq(state->exxc.ptr.p_double[i],state->sclsftbndu.ptr.p_double[i]) ) { xs->ptr.p_double[i] = bndu->ptr.p_double[i]; } } } *terminationtype = 2; } /************************************************************************* This function generates box-constrained QP problem, which is penalized and augmented formulation of original linearly constrained problem -- ALGLIB -- Copyright 23.02.2017 by Bochkanov Sergey *************************************************************************/ static void qpdenseaulsolver_generateexmodel(/* Real */ ae_matrix* sclsfta, /* Real */ ae_vector* sclsftb, ae_int_t nmain, /* Real */ ae_vector* sclsftbndl, /* Boolean */ ae_vector* sclsfthasbndl, /* Real */ ae_vector* sclsftbndu, /* Boolean */ ae_vector* sclsfthasbndu, /* Real */ ae_matrix* sclsftcleic, ae_int_t sclsftnec, ae_int_t sclsftnic, /* Real */ ae_vector* nulc, double rho, /* Real */ ae_matrix* exa, /* Real */ ae_vector* exb, /* Real */ ae_vector* exbndl, /* Real */ ae_vector* exbndu, /* Real */ ae_matrix* tmp2, ae_state *_state) { ae_int_t nslack; ae_int_t ntotal; ae_int_t i; ae_int_t j; double v; nslack = sclsftnic; ntotal = nmain+nslack; /* * Integrity check for properly preallocated storage */ ae_assert(exa->rows>=ntotal&&exa->cols>=ntotal, "QPDenseAUL.GenerateExModel - integrity check failed", _state); ae_assert((exb->cnt>=ntotal&&exbndl->cnt>=ntotal)&&exbndu->cnt>=ntotal, "QPDenseAUL.GenerateExModel - integrity check failed", _state); /* * Primary quadratic term */ for(i=0; i<=ntotal-1; i++) { for(j=i; j<=ntotal-1; j++) { exa->ptr.pp_double[i][j] = (double)(0); } } for(i=0; i<=nmain-1; i++) { for(j=i; j<=nmain-1; j++) { exa->ptr.pp_double[i][j] = sclsfta->ptr.pp_double[i][j]; } } /* * Primary linear term */ for(i=0; i<=ntotal-1; i++) { exb->ptr.p_double[i] = (double)(0); } for(i=0; i<=nmain-1; i++) { exb->ptr.p_double[i] = sclsftb->ptr.p_double[i]; } /* * Box constraints - move primary, add slack */ for(i=0; i<=nmain-1; i++) { if( sclsfthasbndl->ptr.p_bool[i] ) { exbndl->ptr.p_double[i] = sclsftbndl->ptr.p_double[i]; } else { exbndl->ptr.p_double[i] = _state->v_neginf; } if( sclsfthasbndu->ptr.p_bool[i] ) { exbndu->ptr.p_double[i] = sclsftbndu->ptr.p_double[i]; } else { exbndu->ptr.p_double[i] = _state->v_posinf; } } for(i=nmain; i<=ntotal-1; i++) { exbndl->ptr.p_double[i] = (double)(0); exbndu->ptr.p_double[i] = _state->v_posinf; } /* * Handle equality constraints: * * modify quadratic term * * modify linear term * * add Lagrangian term */ rmatrixsetlengthatleast(tmp2, sclsftnec+sclsftnic, ntotal, _state); for(i=0; i<=sclsftnec+sclsftnic-1; i++) { /* * Given constraint row ci and right hand side ri, * I-th quadratic constraint adds penalty term * * 0.5*Rho*(ci'*x-ri)^2 = * = 0.5*Rho*(ci'*x-ri)^T*(ci'*x-ri) = * = 0.5*Rho*(x'*ci-ri')*(ci'*x-ri) = * = 0.5*Rho*(x'*ci*ci'*x - ri'*ci'*x - x'*ci*ri + ri'*ri ) * = 0.5*Rho*(x'*(ci*ci')*x - 2*ri*(ci'*x) + ri^2 ) * * Thus, quadratic term is updated by * * 0.5*Rho*(ci*ci') * * (with actual update to ExA being performed without 0.5 * multiplier because entire matrix is post-multipliead by 0.5) * and linear term receives update * * -Rho*ri*ci * * Similaryly, lagrangian term is -NUi*(ci'*x-ri), * so linear term is updated by * * -NUi*ci * * Because our model does not take into account constant term, * we calculate just quadratic and linear terms. */ ae_v_move(&tmp2->ptr.pp_double[i][0], 1, &sclsftcleic->ptr.pp_double[i][0], 1, ae_v_len(0,nmain-1)); for(j=nmain; j<=ntotal-1; j++) { tmp2->ptr.pp_double[i][j] = (double)(0); } if( i>=sclsftnec ) { tmp2->ptr.pp_double[i][nmain+i-sclsftnec] = 1.0; } v = -rho*sclsftcleic->ptr.pp_double[i][nmain]; ae_v_addd(&exb->ptr.p_double[0], 1, &tmp2->ptr.pp_double[i][0], 1, ae_v_len(0,ntotal-1), v); v = -nulc->ptr.p_double[i]; ae_v_addd(&exb->ptr.p_double[0], 1, &tmp2->ptr.pp_double[i][0], 1, ae_v_len(0,ntotal-1), v); } rmatrixsyrk(ntotal, sclsftnec+sclsftnic, rho, tmp2, 0, 0, 2, 1.0, exa, 0, 0, ae_true, _state); } /************************************************************************* This function generates initial point for "extended" box-constrained QP problem. -- ALGLIB -- Copyright 23.02.2017 by Bochkanov Sergey *************************************************************************/ static void qpdenseaulsolver_generateexinitialpoint(/* Real */ ae_vector* sclsftxc, ae_int_t nmain, ae_int_t nslack, /* Real */ ae_vector* exxc, ae_state *_state) { ae_int_t ntotal; ae_int_t i; ntotal = nmain+nslack; for(i=0; i<=ntotal-1; i++) { exxc->ptr.p_double[i] = (double)(0); } for(i=0; i<=nmain-1; i++) { exxc->ptr.p_double[i] = sclsftxc->ptr.p_double[i]; } } /************************************************************************* This function estimates Lagrange multipliers for scaled-shifted QP problem (here "scaled-shifted" means that we performed variable scaling and subtracted origin) given by quadratic term A, linear term B, box constraints and linear constraint matrix. It is assumed that all linear constraints are equality ones, with first NEC ones being constraints without slack variables, and next NIC ones having slack variables. The only inequality constraints we have are box ones, with first NMain ones being "general" box constraints, and next NIC ones being non-negativity constraints (not specified explicitly). We also make use of the current point XC, which is used to determine active box constraints. Actual QP problem size is NMain+NIC, but some parameters have lower dimensionality. Parameters sizes are: * A is assumed to be array[NMain,NMain] * B is assumed to be array[NMain] * BndL, BndU are array[NMain] * CLEIC is array[NEC+NIC,NMain+1] (last item in a row containts right part) * ExXC is array[NMain+NIC], holds current point * NuLCEst is array[NEC+NIC], holds initial values of Lagrange coeffs On exit NuLCEst is updated with new estimate of Lagrange multipliers. -- ALGLIB -- Copyright 23.02.2017 by Bochkanov Sergey *************************************************************************/ static void qpdenseaulsolver_updatelagrangemultipliers(/* Real */ ae_matrix* sclsfta, /* Real */ ae_vector* sclsftb, ae_int_t nmain, /* Real */ ae_vector* sclsftbndl, /* Boolean */ ae_vector* sclsfthasbndl, /* Real */ ae_vector* sclsftbndu, /* Boolean */ ae_vector* sclsfthasbndu, /* Real */ ae_matrix* sclsftcleic, ae_int_t sclsftnec, ae_int_t sclsftnic, /* Real */ ae_vector* exxc, /* Real */ ae_vector* nulcest, qpdenseaulbuffers* buffers, ae_state *_state) { ae_int_t nslack; ae_int_t ntotal; ae_int_t ktotal; ae_int_t nqrrows; ae_int_t nqrcols; ae_int_t i; ae_int_t j; double lambdareg; double mxdiag; double v; ae_bool isactive; nslack = sclsftnic; ntotal = nmain+nslack; ktotal = sclsftnec+sclsftnic; /* * Given current point ExXC, we can determine active and inactive * constraints. After we drop inactive inequality constraints, we * have equality-only constrained QP problem, with mix of general * linear equality constraints and "simple" constraints Xi=Ci. * * Problem min(0.5*x'*A*x + b'*x) s.t. C*x=d (general linear * constraints) can be solved by explicitly writing out Lagrange * equations: * * [ A C' ] [ X ] [ -b] * [ ] [ ] = [ ] * [ C ] [ L ] [ d ] * * or * * [ X ] * A1* [ ] = b1 * [ L ] * * where X stands for solution itself, and L stands for Lagrange * multipliers. It can be easily solved with direct linear solver. * However, such formulation does not account for "simple" equality * constraints on variables. It is possible to include "simple" * constraints into "general" ones (i.e. append (0 ... 0 -1 0 ... 0)' * to the constraint matrix), but it will increase problem * size. * * Another approach is to use initial values of X and L (X0 and L0) * as starting point, and to solve for "offset" from (X0, L0): * * [ X0+X1 ] * A1*[ ] = b1 * [ L0+L1 ] * * or * * [ X1 ] [ X0 ] * A1*[ ] = b1 - A1*[ ] * [ L1 ] [ L0 ] * * In such formulation components of X1 which correspond to active * constraints on variables are "frozen" at value 0 (because we have * equality constraint, offset from constrained value have to be zero). * * Thus, we can rewrite corresponding columns of A1 with zeros - and * use this space to store (0 ... 0 -1 0 ... 0)', which is used to * account for Lagrange multipliers for "simple" constraints. */ nqrcols = ntotal+ktotal; nqrrows = nqrcols; rvectorsetlengthatleast(&buffers->qrsv0, nqrcols, _state); rvectorsetlengthatleast(&buffers->qrsvx1, nqrcols, _state); for(i=0; i<=ntotal-1; i++) { buffers->qrsv0.ptr.p_double[i] = exxc->ptr.p_double[i]; } for(i=0; i<=ktotal-1; i++) { buffers->qrsv0.ptr.p_double[ntotal+i] = nulcest->ptr.p_double[i]; } rmatrixsetlengthatleast(&buffers->qrkkt, nqrcols+nqrcols, nqrcols+1, _state); rvectorsetlengthatleast(&buffers->qrrightpart, nqrcols+nqrcols, _state); lambdareg = 1.0E-8; for(;;) { /* * Initialize matrix A1 and right part b1 with zeros */ for(i=0; i<=buffers->qrkkt.rows-1; i++) { for(j=0; j<=buffers->qrkkt.cols-1; j++) { buffers->qrkkt.ptr.pp_double[i][j] = (double)(0); } buffers->qrrightpart.ptr.p_double[i] = (double)(0); } /* * Append quadratic term (note: we implicitly add NSlack zeros to * A and b). */ mxdiag = (double)(0); for(i=0; i<=nmain-1; i++) { for(j=0; j<=nmain-1; j++) { buffers->qrkkt.ptr.pp_double[i][j] = sclsfta->ptr.pp_double[i][j]; } buffers->qrrightpart.ptr.p_double[i] = -sclsftb->ptr.p_double[i]; mxdiag = ae_maxreal(mxdiag, ae_fabs(sclsfta->ptr.pp_double[i][i], _state), _state); } mxdiag = coalesce(mxdiag, (double)(1), _state); /* * Append general linear constraints */ for(i=0; i<=ktotal-1; i++) { for(j=0; j<=nmain-1; j++) { buffers->qrkkt.ptr.pp_double[ntotal+i][j] = -sclsftcleic->ptr.pp_double[i][j]; buffers->qrkkt.ptr.pp_double[j][ntotal+i] = -sclsftcleic->ptr.pp_double[i][j]; } if( i>=sclsftnec ) { buffers->qrkkt.ptr.pp_double[ntotal+i][nmain+(i-sclsftnec)] = (double)(-1); buffers->qrkkt.ptr.pp_double[nmain+(i-sclsftnec)][ntotal+i] = (double)(-1); } buffers->qrrightpart.ptr.p_double[ntotal+i] = -sclsftcleic->ptr.pp_double[i][nmain]; } /* * Append regularizer to the bottom of the matrix * (it will be factored in during QR decomposition) */ if( ae_fp_greater(lambdareg,(double)(0)) ) { nqrrows = nqrcols+nqrcols; for(i=0; i<=nqrcols-1; i++) { buffers->qrkkt.ptr.pp_double[nqrcols+i][i] = lambdareg*mxdiag; } } /* * Subtract reference point (X0,L0) from the system */ for(i=0; i<=nqrcols-1; i++) { v = ae_v_dotproduct(&buffers->qrkkt.ptr.pp_double[i][0], 1, &buffers->qrsv0.ptr.p_double[0], 1, ae_v_len(0,nqrcols-1)); buffers->qrrightpart.ptr.p_double[i] = buffers->qrrightpart.ptr.p_double[i]-v; } /* * Handle active "simple" equality constraints */ for(i=0; i<=ntotal-1; i++) { isactive = ae_false; if( iptr.p_bool[i]&&ae_fp_eq(exxc->ptr.p_double[i],sclsftbndl->ptr.p_double[i]))||(sclsfthasbndu->ptr.p_bool[i]&&ae_fp_eq(exxc->ptr.p_double[i],sclsftbndu->ptr.p_double[i]))) ) { isactive = ae_true; } if( i>=nmain&&ae_fp_eq(exxc->ptr.p_double[i],0.0) ) { isactive = ae_true; } if( !isactive ) { continue; } for(j=0; j<=nqrrows-1; j++) { buffers->qrkkt.ptr.pp_double[j][i] = (double)(0); } buffers->qrkkt.ptr.pp_double[i][i] = (double)(-1); } /* * Solve via QR decomposition: * * append right part to the system matrix * * perform QR decomposition of the extended matrix (right part is implicitly * multiplied by Q during decomposition; believe me, it works!) * * check condition number, increase regularization value if necessary and retry * * solve triangular system, break iteration */ for(i=0; i<=nqrrows-1; i++) { buffers->qrkkt.ptr.pp_double[i][nqrcols] = buffers->qrrightpart.ptr.p_double[i]; } rmatrixqr(&buffers->qrkkt, nqrrows, nqrcols+1, &buffers->qrtau, _state); if( ae_fp_less_eq(rmatrixtrrcond1(&buffers->qrkkt, nqrcols, ae_true, ae_false, _state),1000*ae_machineepsilon) ) { lambdareg = coalesce(10*lambdareg, 1.0E-13, _state); continue; } for(i=nqrcols-1; i>=0; i--) { v = buffers->qrkkt.ptr.pp_double[i][nqrcols]; for(j=i+1; j<=nqrcols-1; j++) { v = v-buffers->qrkkt.ptr.pp_double[i][j]*buffers->qrsvx1.ptr.p_double[j]; } buffers->qrsvx1.ptr.p_double[i] = v/buffers->qrkkt.ptr.pp_double[i][i]; } break; } /* * Update Lagrange coefficients */ for(i=0; i<=ktotal-1; i++) { nulcest->ptr.p_double[i] = buffers->qrsv0.ptr.p_double[ntotal+i]+buffers->qrsvx1.ptr.p_double[ntotal+i]; } } /************************************************************************* This function generates scaled (by S) and shifted (by XC) reformulation of the original problem. INPUT PARAMETERS: DenseA - for dense problems (AKind=0), A-term of CQM object contains system matrix. Other terms are unspecified and should not be referenced. SparseA - for sparse problems (AKind=1), CRS format AKind - sparse matrix format: * 0 for dense matrix * 1 for sparse matrix SparseUpper - which triangle of SparseAC stores matrix - upper or lower one (for dense matrices this parameter is not actual). B - linear term, array[N] BndL - lower bound, array[N] BndU - upper bound, array[N] S - scale vector, array[NC]: * I-th element contains scale of I-th variable, * SC[I]>0 XOrigin - origin term, array[NC]. Can be zero. N - number of variables in the original formulation (no slack variables). CLEIC - dense linear equality/inequality constraints. Equality constraints come first. NEC, NIC - number of dense equality/inequality constraints. SCLEIC - sparse linear equality/inequality constraints. Equality constraints come first. SNEC, SNIC - number of sparse equality/inequality constraints. RenormLC - whether constraints should be renormalized (recommended) or used "as is". Settings - QPDENSEAULSettings object initialized by one of the initialization functions. State - object which stores temporaries XS - initial point, array[NC] On output, following fields of the State structure are modified: * SclSftA - array[NMain,NMain], quadratic term, both triangles * SclSftB - array[NMain], linear term * SclSftXC - array[NMain], initial point * SclSftHasBndL, SclSftHasBndU, SclSftBndL, SclSftBndU - array[NMain], lower/upper bounds * SclSftCLEIC - array[KTotal,NMain+1], general linear constraints NOTE: State.Tmp2 is used to store temporary array[NMain,NMain] -- ALGLIB -- Copyright 01.10.2017 by Bochkanov Sergey *************************************************************************/ static void qpdenseaulsolver_scaleshiftoriginalproblem(convexquadraticmodel* a, sparsematrix* sparsea, ae_int_t akind, ae_bool sparseaupper, /* Real */ ae_vector* b, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t nmain, /* Real */ ae_matrix* cleic, ae_int_t dnec, ae_int_t dnic, sparsematrix* scleic, ae_int_t snec, ae_int_t snic, ae_bool renormlc, qpdenseaulbuffers* state, /* Real */ ae_vector* xs, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t j0; ae_int_t j1; double v; double vv; ae_int_t ktotal; ae_assert(akind==0||akind==1, "QPDENSEAULOptimize: unexpected AKind", _state); ktotal = dnec+dnic+snec+snic; rmatrixsetlengthatleast(&state->sclsfta, nmain, nmain, _state); rvectorsetlengthatleast(&state->sclsftb, nmain, _state); rvectorsetlengthatleast(&state->sclsftxc, nmain, _state); rvectorsetlengthatleast(&state->sclsftbndl, nmain, _state); rvectorsetlengthatleast(&state->sclsftbndu, nmain, _state); bvectorsetlengthatleast(&state->sclsfthasbndl, nmain, _state); bvectorsetlengthatleast(&state->sclsfthasbndu, nmain, _state); rmatrixsetlengthatleast(&state->sclsftcleic, ktotal, nmain+1, _state); rvectorsetlengthatleast(&state->cscales, ktotal, _state); if( akind==0 ) { /* * Extract dense A and scale */ cqmgeta(a, &state->tmp2, _state); for(i=0; i<=nmain-1; i++) { for(j=0; j<=nmain-1; j++) { state->sclsfta.ptr.pp_double[i][j] = (double)(0); } } for(i=0; i<=nmain-1; i++) { for(j=i; j<=nmain-1; j++) { v = state->tmp2.ptr.pp_double[i][j]*s->ptr.p_double[i]*s->ptr.p_double[j]; state->sclsfta.ptr.pp_double[i][j] = v; state->sclsfta.ptr.pp_double[j][i] = v; } } } if( akind==1 ) { /* * Extract sparse A and scale */ ae_assert(sparsea->matrixtype==1, "QPDENSEAULOptimize: unexpected sparse matrix format", _state); ae_assert(sparsea->m==nmain, "QPDENSEAULOptimize: unexpected sparse matrix size", _state); ae_assert(sparsea->n==nmain, "QPDENSEAULOptimize: unexpected sparse matrix size", _state); for(i=0; i<=nmain-1; i++) { for(j=0; j<=nmain-1; j++) { state->sclsfta.ptr.pp_double[i][j] = (double)(0); } } if( sparseaupper ) { for(i=0; i<=nmain-1; i++) { if( sparsea->didx.ptr.p_int[i]!=sparsea->uidx.ptr.p_int[i] ) { state->sclsfta.ptr.pp_double[i][i] = sparsea->vals.ptr.p_double[sparsea->didx.ptr.p_int[i]]*s->ptr.p_double[i]*s->ptr.p_double[i]; } j0 = sparsea->uidx.ptr.p_int[i]; j1 = sparsea->ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { k = sparsea->idx.ptr.p_int[j]; v = sparsea->vals.ptr.p_double[j]*s->ptr.p_double[i]*s->ptr.p_double[k]; state->sclsfta.ptr.pp_double[i][k] = v; state->sclsfta.ptr.pp_double[k][i] = v; } } } else { for(i=0; i<=nmain-1; i++) { if( sparsea->didx.ptr.p_int[i]!=sparsea->uidx.ptr.p_int[i] ) { state->sclsfta.ptr.pp_double[i][i] = sparsea->vals.ptr.p_double[sparsea->didx.ptr.p_int[i]]*s->ptr.p_double[i]*s->ptr.p_double[i]; } j0 = sparsea->ridx.ptr.p_int[i]; j1 = sparsea->didx.ptr.p_int[i]-1; for(j=j0; j<=j1; j++) { k = sparsea->idx.ptr.p_int[j]; v = sparsea->vals.ptr.p_double[j]*s->ptr.p_double[i]*s->ptr.p_double[k]; state->sclsfta.ptr.pp_double[i][k] = v; state->sclsfta.ptr.pp_double[k][i] = v; } } } } for(i=0; i<=nmain-1; i++) { state->sclsftb.ptr.p_double[i] = b->ptr.p_double[i]*s->ptr.p_double[i]; state->sclsftxc.ptr.p_double[i] = (xs->ptr.p_double[i]-xorigin->ptr.p_double[i])/s->ptr.p_double[i]; state->sclsfthasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->sclsfthasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); state->sclsftbndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->sclsftbndu.ptr.p_double[i] = bndu->ptr.p_double[i]; } scaleshiftbcinplace(s, xorigin, &state->sclsftbndl, &state->sclsftbndu, nmain, _state); for(i=0; i<=ktotal-1; i++) { for(j=0; j<=nmain; j++) { state->sclsftcleic.ptr.pp_double[i][j] = (double)(0); } } for(i=0; i<=dnec-1; i++) { for(j=0; j<=nmain-1; j++) { v = cleic->ptr.pp_double[i][j]*s->ptr.p_double[j]; state->sclsftcleic.ptr.pp_double[i][j] = v; } state->sclsftcleic.ptr.pp_double[i][nmain] = cleic->ptr.pp_double[i][nmain]; } for(i=0; i<=dnic-1; i++) { for(j=0; j<=nmain-1; j++) { v = cleic->ptr.pp_double[dnec+i][j]*s->ptr.p_double[j]; state->sclsftcleic.ptr.pp_double[dnec+snec+i][j] = v; } state->sclsftcleic.ptr.pp_double[dnec+snec+i][nmain] = cleic->ptr.pp_double[dnec+i][nmain]; } for(i=0; i<=snec-1; i++) { /* * Because constraints are sparse, everything is a bit tricky - * it is possible that N-th element of the row is zero and not * stored; it is also possible that entire row is empty. */ j0 = scleic->ridx.ptr.p_int[i]; j1 = scleic->ridx.ptr.p_int[i+1]-1; if( j1>=j0&&scleic->idx.ptr.p_int[j1]==nmain ) { state->sclsftcleic.ptr.pp_double[dnec+i][nmain] = scleic->vals.ptr.p_double[j1]; j1 = j1-1; } for(j=j0; j<=j1; j++) { k = scleic->idx.ptr.p_int[j]; v = scleic->vals.ptr.p_double[j]*s->ptr.p_double[k]; state->sclsftcleic.ptr.pp_double[dnec+i][k] = v; } } for(i=0; i<=snic-1; i++) { /* * Because constraints are sparse, everything is a bit tricky - * it is possible that N-th element of the row is zero and not * stored; it is also possible that entire row is empty. */ j0 = scleic->ridx.ptr.p_int[snec+i]; j1 = scleic->ridx.ptr.p_int[snec+i+1]-1; if( j1>=j0&&scleic->idx.ptr.p_int[j1]==nmain ) { state->sclsftcleic.ptr.pp_double[dnec+snec+dnic+i][nmain] = scleic->vals.ptr.p_double[j1]; j1 = j1-1; } for(j=j0; j<=j1; j++) { k = scleic->idx.ptr.p_int[j]; v = scleic->vals.ptr.p_double[j]*s->ptr.p_double[k]; state->sclsftcleic.ptr.pp_double[dnec+snec+dnic+i][k] = v; } } if( renormlc&&ktotal>0 ) { /* * Normalize linear constraints in such way that they have unit norm * (after variable scaling) */ for(i=0; i<=ktotal-1; i++) { vv = 0.0; for(j=0; j<=nmain-1; j++) { v = state->sclsftcleic.ptr.pp_double[i][j]; vv = vv+v*v; } vv = ae_sqrt(vv, _state); state->cscales.ptr.p_double[i] = vv; if( ae_fp_greater(vv,(double)(0)) ) { vv = 1/vv; for(j=0; j<=nmain; j++) { state->sclsftcleic.ptr.pp_double[i][j] = state->sclsftcleic.ptr.pp_double[i][j]*vv; } } } } else { /* * Load unit scales */ for(i=0; i<=ktotal-1; i++) { state->cscales.ptr.p_double[i] = 1.0; } } for(i=0; i<=ktotal-1; i++) { /* * Apply XOrigin */ v = 0.0; for(j=0; j<=nmain-1; j++) { v = v+state->sclsftcleic.ptr.pp_double[i][j]*(xorigin->ptr.p_double[j]/s->ptr.p_double[j]); } state->sclsftcleic.ptr.pp_double[i][nmain] = state->sclsftcleic.ptr.pp_double[i][nmain]-v; } } /************************************************************************* Normalize model in such way that norm(A)~1 (very roughly) We have two lower bounds for sigma_max(A): * first estimate is provided by Frobenius norm, it is equal to ANorm/NMain * second estimate is provided by max(CAC) We select largest one of these estimates, because using just one of them is prone to different failure modes. Then, we divide A and B by this estimate. INPUT PARAMETERS: A - array[N,N], quadratic term, full triangle is given B - array[N], linear term N - problem size CLEIC- array[NEC+NIC,N+1], linear equality/inequality constraints NEC - number of equality constraints NIC - number of inequality constraints UseCLEIC- additional normalization of A in such way that CLEIC*A*CLEIC'~1: * if False, CLEIC is ignored * if True, CLEIC rows MUST have unit norm (we check it) Tmp2- additional buffer, possibly preallocated OUTPUT PARAMETERS: A, B - appropriately rescaled by 1/SCL RESULT: multiplier SCL -- ALGLIB -- Copyright 01.10.2017 by Bochkanov Sergey *************************************************************************/ static double qpdenseaulsolver_normalizequadraticterm(/* Real */ ae_matrix* a, /* Real */ ae_vector* b, ae_int_t n, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, ae_bool usecleic, /* Real */ ae_matrix* tmp2, ae_state *_state) { ae_int_t i; ae_int_t j; double anorm; double maxcac; double v; double vv; ae_int_t ktotal; ae_int_t nmain; double result; nmain = n; ktotal = nec+nic; anorm = (double)(0); for(i=0; i<=nmain-1; i++) { for(j=0; j<=nmain-1; j++) { anorm = anorm+ae_sqr(a->ptr.pp_double[i][j], _state); } } anorm = ae_sqrt(anorm, _state); if( usecleic&&ktotal>0 ) { /* * Calculate max(|diag(C*A*C')|), where C is constraint matrix */ rmatrixsetlengthatleast(tmp2, ktotal, nmain, _state); rmatrixgemm(ktotal, nmain, nmain, 1.0, cleic, 0, 0, 0, a, 0, 0, 0, 0.0, tmp2, 0, 0, _state); maxcac = 0.0; for(i=0; i<=ktotal-1; i++) { v = (double)(0); vv = (double)(0); for(j=0; j<=nmain-1; j++) { v = v+tmp2->ptr.pp_double[i][j]*cleic->ptr.pp_double[i][j]; vv = vv+ae_sqr(cleic->ptr.pp_double[i][j], _state); } ae_assert(ae_fp_less(ae_fabs(vv-1, _state),1.0E-9)||ae_fp_eq(vv,(double)(0)), "DENSE-AUL: integrity check failed", _state); maxcac = ae_maxreal(maxcac, ae_fabs(v, _state), _state); } } else { maxcac = (double)(0); } result = coalesce(ae_maxreal(maxcac, anorm/nmain, _state), (double)(1), _state); v = 1/result; for(i=0; i<=nmain-1; i++) { for(j=0; j<=nmain-1; j++) { a->ptr.pp_double[i][j] = a->ptr.pp_double[i][j]*v; } } for(i=0; i<=nmain-1; i++) { b->ptr.p_double[i] = b->ptr.p_double[i]*v; } return result; } /************************************************************************* This function selects initial working set of general inequality constraints for QP problem: * for non-convex QP problems - NICWork=NIC is returned * otherwise - NICWork=0 is returned (we have to determine working set iteratively) INPUT PARAMETERS: A - array[NMain], quadratic term, full matrix is stored NMain - number of variables in the "original" QP problem CLEIC - array[NEC+NIC,NMain+1], constraint matrix NEC - number of equality constraints NIC - number of inequality constraints OUTPUT PARAMETERS: NICWork - recommended size of working set; in current version either all (NICWork=NIC) or none (NICWork=0) constraints are included. AllowWSEviction-whether problem properties allow eviction of constraints from working set or not. Non-convex problems do not allow eviction, convex ones do. -- ALGLIB -- Copyright 02.10.2017 by Bochkanov Sergey *************************************************************************/ static void qpdenseaulsolver_selectinitialworkingset(/* Real */ ae_matrix* a, ae_int_t nmain, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, /* Real */ ae_vector* tmp0, /* Real */ ae_matrix* tmp2, ae_int_t* nicwork, ae_bool* allowwseviction, ae_state *_state) { ae_int_t i; ae_int_t j; *nicwork = 0; *allowwseviction = ae_false; rmatrixsetlengthatleast(tmp2, nmain, nmain, _state); rvectorsetlengthatleast(tmp0, nmain, _state); for(i=0; i<=nmain-1; i++) { for(j=i; j<=nmain-1; j++) { tmp2->ptr.pp_double[i][j] = a->ptr.pp_double[i][j]; } } if( !spdmatrixcholeskyrec(tmp2, 0, nmain, ae_true, tmp0, _state) ) { /* * Matrix is indefinite. * * We have to select full working set, otherwise algorithm may fail * because problem with reduced working set can be unbounded from below. */ *nicwork = nic; *allowwseviction = ae_false; } else { /* * Positive definite matrix. * * We can select zero initial working set and expand it later. */ *nicwork = 0; *allowwseviction = ae_true; } } void _qpdenseaulsettings_init(void* _p, ae_state *_state, ae_bool make_automatic) { qpdenseaulsettings *p = (qpdenseaulsettings*)_p; ae_touch_ptr((void*)p); } void _qpdenseaulsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qpdenseaulsettings *dst = (qpdenseaulsettings*)_dst; qpdenseaulsettings *src = (qpdenseaulsettings*)_src; dst->epsx = src->epsx; dst->outerits = src->outerits; dst->rho = src->rho; } void _qpdenseaulsettings_clear(void* _p) { qpdenseaulsettings *p = (qpdenseaulsettings*)_p; ae_touch_ptr((void*)p); } void _qpdenseaulsettings_destroy(void* _p) { qpdenseaulsettings *p = (qpdenseaulsettings*)_p; ae_touch_ptr((void*)p); } void _qpdenseaulbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) { qpdenseaulbuffers *p = (qpdenseaulbuffers*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->nulc, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->sclsfta, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclsftb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclsfthasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->sclsfthasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->sclsftbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclsftbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclsftxc, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->sclsftcleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->cscales, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->exa, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exxc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exscale, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->exxorigin, 0, DT_REAL, _state, make_automatic); _qqpsettings_init(&p->qqpsettingsuser, _state, make_automatic); _qqpbuffers_init(&p->qqpbuf, _state, make_automatic); ae_vector_init(&p->nulcest, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmp2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->modelg, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->deltax, 0, DT_REAL, _state, make_automatic); _convexquadraticmodel_init(&p->dummycqm, _state, make_automatic); _sparsematrix_init(&p->dummysparse, _state, make_automatic); ae_matrix_init(&p->qrkkt, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->qrrightpart, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->qrtau, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->qrsv0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->qrsvx1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nicerr, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nicnact, 0, DT_INT, _state, make_automatic); } void _qpdenseaulbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qpdenseaulbuffers *dst = (qpdenseaulbuffers*)_dst; qpdenseaulbuffers *src = (qpdenseaulbuffers*)_src; ae_vector_init_copy(&dst->nulc, &src->nulc, _state, make_automatic); ae_matrix_init_copy(&dst->sclsfta, &src->sclsfta, _state, make_automatic); ae_vector_init_copy(&dst->sclsftb, &src->sclsftb, _state, make_automatic); ae_vector_init_copy(&dst->sclsfthasbndl, &src->sclsfthasbndl, _state, make_automatic); ae_vector_init_copy(&dst->sclsfthasbndu, &src->sclsfthasbndu, _state, make_automatic); ae_vector_init_copy(&dst->sclsftbndl, &src->sclsftbndl, _state, make_automatic); ae_vector_init_copy(&dst->sclsftbndu, &src->sclsftbndu, _state, make_automatic); ae_vector_init_copy(&dst->sclsftxc, &src->sclsftxc, _state, make_automatic); ae_matrix_init_copy(&dst->sclsftcleic, &src->sclsftcleic, _state, make_automatic); ae_vector_init_copy(&dst->cidx, &src->cidx, _state, make_automatic); ae_vector_init_copy(&dst->cscales, &src->cscales, _state, make_automatic); ae_matrix_init_copy(&dst->exa, &src->exa, _state, make_automatic); ae_vector_init_copy(&dst->exb, &src->exb, _state, make_automatic); ae_vector_init_copy(&dst->exxc, &src->exxc, _state, make_automatic); ae_vector_init_copy(&dst->exbndl, &src->exbndl, _state, make_automatic); ae_vector_init_copy(&dst->exbndu, &src->exbndu, _state, make_automatic); ae_vector_init_copy(&dst->exscale, &src->exscale, _state, make_automatic); ae_vector_init_copy(&dst->exxorigin, &src->exxorigin, _state, make_automatic); _qqpsettings_init_copy(&dst->qqpsettingsuser, &src->qqpsettingsuser, _state, make_automatic); _qqpbuffers_init_copy(&dst->qqpbuf, &src->qqpbuf, _state, make_automatic); ae_vector_init_copy(&dst->nulcest, &src->nulcest, _state, make_automatic); ae_vector_init_copy(&dst->tmpg, &src->tmpg, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_matrix_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_vector_init_copy(&dst->modelg, &src->modelg, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->deltax, &src->deltax, _state, make_automatic); _convexquadraticmodel_init_copy(&dst->dummycqm, &src->dummycqm, _state, make_automatic); _sparsematrix_init_copy(&dst->dummysparse, &src->dummysparse, _state, make_automatic); ae_matrix_init_copy(&dst->qrkkt, &src->qrkkt, _state, make_automatic); ae_vector_init_copy(&dst->qrrightpart, &src->qrrightpart, _state, make_automatic); ae_vector_init_copy(&dst->qrtau, &src->qrtau, _state, make_automatic); ae_vector_init_copy(&dst->qrsv0, &src->qrsv0, _state, make_automatic); ae_vector_init_copy(&dst->qrsvx1, &src->qrsvx1, _state, make_automatic); ae_vector_init_copy(&dst->nicerr, &src->nicerr, _state, make_automatic); ae_vector_init_copy(&dst->nicnact, &src->nicnact, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repncholesky = src->repncholesky; dst->repnwrkchanges = src->repnwrkchanges; dst->repnwrk0 = src->repnwrk0; dst->repnwrk1 = src->repnwrk1; dst->repnwrkf = src->repnwrkf; dst->repnmv = src->repnmv; } void _qpdenseaulbuffers_clear(void* _p) { qpdenseaulbuffers *p = (qpdenseaulbuffers*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->nulc); ae_matrix_clear(&p->sclsfta); ae_vector_clear(&p->sclsftb); ae_vector_clear(&p->sclsfthasbndl); ae_vector_clear(&p->sclsfthasbndu); ae_vector_clear(&p->sclsftbndl); ae_vector_clear(&p->sclsftbndu); ae_vector_clear(&p->sclsftxc); ae_matrix_clear(&p->sclsftcleic); ae_vector_clear(&p->cidx); ae_vector_clear(&p->cscales); ae_matrix_clear(&p->exa); ae_vector_clear(&p->exb); ae_vector_clear(&p->exxc); ae_vector_clear(&p->exbndl); ae_vector_clear(&p->exbndu); ae_vector_clear(&p->exscale); ae_vector_clear(&p->exxorigin); _qqpsettings_clear(&p->qqpsettingsuser); _qqpbuffers_clear(&p->qqpbuf); ae_vector_clear(&p->nulcest); ae_vector_clear(&p->tmpg); ae_vector_clear(&p->tmp0); ae_matrix_clear(&p->tmp2); ae_vector_clear(&p->modelg); ae_vector_clear(&p->d); ae_vector_clear(&p->deltax); _convexquadraticmodel_clear(&p->dummycqm); _sparsematrix_clear(&p->dummysparse); ae_matrix_clear(&p->qrkkt); ae_vector_clear(&p->qrrightpart); ae_vector_clear(&p->qrtau); ae_vector_clear(&p->qrsv0); ae_vector_clear(&p->qrsvx1); ae_vector_clear(&p->nicerr); ae_vector_clear(&p->nicnact); } void _qpdenseaulbuffers_destroy(void* _p) { qpdenseaulbuffers *p = (qpdenseaulbuffers*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->nulc); ae_matrix_destroy(&p->sclsfta); ae_vector_destroy(&p->sclsftb); ae_vector_destroy(&p->sclsfthasbndl); ae_vector_destroy(&p->sclsfthasbndu); ae_vector_destroy(&p->sclsftbndl); ae_vector_destroy(&p->sclsftbndu); ae_vector_destroy(&p->sclsftxc); ae_matrix_destroy(&p->sclsftcleic); ae_vector_destroy(&p->cidx); ae_vector_destroy(&p->cscales); ae_matrix_destroy(&p->exa); ae_vector_destroy(&p->exb); ae_vector_destroy(&p->exxc); ae_vector_destroy(&p->exbndl); ae_vector_destroy(&p->exbndu); ae_vector_destroy(&p->exscale); ae_vector_destroy(&p->exxorigin); _qqpsettings_destroy(&p->qqpsettingsuser); _qqpbuffers_destroy(&p->qqpbuf); ae_vector_destroy(&p->nulcest); ae_vector_destroy(&p->tmpg); ae_vector_destroy(&p->tmp0); ae_matrix_destroy(&p->tmp2); ae_vector_destroy(&p->modelg); ae_vector_destroy(&p->d); ae_vector_destroy(&p->deltax); _convexquadraticmodel_destroy(&p->dummycqm); _sparsematrix_destroy(&p->dummysparse); ae_matrix_destroy(&p->qrkkt); ae_vector_destroy(&p->qrrightpart); ae_vector_destroy(&p->qrtau); ae_vector_destroy(&p->qrsv0); ae_vector_destroy(&p->qrsvx1); ae_vector_destroy(&p->nicerr); ae_vector_destroy(&p->nicnact); } #endif #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* BOUND CONSTRAINED OPTIMIZATION WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BLEIC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBLEICCreate() call 2. USer adds boundary and/or linear constraints by calling MinBLEICSetBC() and MinBLEICSetLC() functions. 3. User sets stopping conditions with MinBLEICSetCond(). 4. User calls MinBLEICOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBLEICResults() to get solution 6. Optionally user may call MinBLEICRestartFrom() to solve another problem with same N but another starting point. MinBLEICRestartFrom() allows to reuse already initialized structure. NOTE: if you have box-only constraints (no general linear constraints), then MinBC optimizer can be better option. It uses special, faster constraint activation method, which performs better on problems with multiple constraints active at the solution. On small-scale problems performance of MinBC is similar to that of MinBLEIC, but on large-scale ones (hundreds and thousands of active constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleiccreate(ae_int_t n, /* Real */ ae_vector* x, minbleicstate* state, ae_state *_state) { ae_frame _frame_block; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); _minbleicstate_clear(state); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); ae_assert(n>=1, "MinBLEICCreate: N<1", _state); ae_assert(x->cnt>=n, "MinBLEICCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBLEICSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minbleiccreatef(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbleicstate* state, ae_state *_state) { ae_frame _frame_block; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); _minbleicstate_clear(state); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); ae_assert(n>=1, "MinBLEICCreateF: N<1", _state); ae_assert(x->cnt>=n, "MinBLEICCreateF: Length(X)nmain; ae_assert(bndl->cnt>=n, "MinBLEICSetBC: Length(BndL)cnt>=n, "MinBLEICSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinBLEICSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinBLEICSetBC: BndL contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } sassetbc(&state->sas, bndl, bndu, _state); } /************************************************************************* This function sets linear constraints for BLEIC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinBLEICRestartFrom(). INPUT PARAMETERS: State - structure previously allocated with MinBLEICCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about Epsilon in magnitude) due to rounding errors * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetlc(minbleicstate* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; double v; n = state->nmain; /* * First, check for errors in the inputs */ ae_assert(k>=0, "MinBLEICSetLC: K<0", _state); ae_assert(c->cols>=n+1||k==0, "MinBLEICSetLC: Cols(C)rows>=k, "MinBLEICSetLC: Rows(C)cnt>=k, "MinBLEICSetLC: Length(CT)nec = 0; state->nic = 0; sassetlc(&state->sas, c, ct, 0, _state); return; } /* * Equality constraints are stored first, in the upper * NEC rows of State.CLEIC matrix. Inequality constraints * are stored in the next NIC rows. * * NOTE: we convert inequality constraints to the form * A*x<=b before copying them. */ rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); state->nec = 0; state->nic = 0; for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]==0 ) { ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); state->nec = state->nec+1; } } for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]!=0 ) { if( ct->ptr.p_int[i]>0 ) { ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } else { ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } state->nic = state->nic+1; } } /* * Normalize rows of State.CLEIC: each row must have unit norm. * Norm is calculated using first N elements (i.e. right part is * not counted when we calculate norm). */ for(i=0; i<=k-1; i++) { v = (double)(0); for(j=0; j<=n-1; j++) { v = v+ae_sqr(state->cleic.ptr.pp_double[i][j], _state); } if( ae_fp_eq(v,(double)(0)) ) { continue; } v = 1/ae_sqrt(v, _state); ae_v_muld(&state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n), v); } sassetlc(&state->sas, c, ct, k, _state); } /************************************************************************* This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinBLEICSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform slightly more than MaxIts iterations. I.e., MaxIts sets non-strict limit on iterations count. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetcond(minbleicstate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinBLEICSetCond: EpsG is not finite number", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinBLEICSetCond: negative EpsG", _state); ae_assert(ae_isfinite(epsf, _state), "MinBLEICSetCond: EpsF is not finite number", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinBLEICSetCond: negative EpsF", _state); ae_assert(ae_isfinite(epsx, _state), "MinBLEICSetCond: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinBLEICSetCond: negative EpsX", _state); ae_assert(maxits>=0, "MinBLEICSetCond: negative MaxIts!", _state); if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->epsg = epsg; state->epsf = epsf; state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function sets scaling coefficients for BLEIC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the BLEIC too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinBLEICSetPrec...() functions. There is a special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minbleicsetscale(minbleicstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->nmain, "MinBLEICSetScale: Length(S)nmain-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinBLEICSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinBLEICSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } sassetscale(&state->sas, s, _state); } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecdefault(minbleicstate* state, ae_state *_state) { state->prectype = 0; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE 1: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecdiag(minbleicstate* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(d->cnt>=state->nmain, "MinBLEICSetPrecDiag: D is too short", _state); for(i=0; i<=state->nmain-1; i++) { ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinBLEICSetPrecDiag: D contains infinite or NAN elements", _state); ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "MinBLEICSetPrecDiag: D contains non-positive elements", _state); } rvectorsetlengthatleast(&state->diagh, state->nmain, _state); state->prectype = 2; for(i=0; i<=state->nmain-1; i++) { state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; } } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinBLEICSetScale() call (before or after MinBLEICSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetprecscale(minbleicstate* state, ae_state *_state) { state->prectype = 3; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinBLEICOptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetxrep(minbleicstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This function turns on/off line search reports. These reports are described in more details in developer-only comments on MinBLEICState object. INPUT PARAMETERS: State - structure which stores algorithm state NeedDRep- whether line search reports are needed or not This function is intended for private use only. Turning it on artificially may cause program failure. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetdrep(minbleicstate* state, ae_bool needdrep, ae_state *_state) { state->drep = needdrep; } /************************************************************************* This function sets maximum step length IMPORTANT: this feature is hard to combine with preconditioning. You can't set upper limit on step length, when you solve optimization problem with linear (non-boundary) constraints AND preconditioner turned on. When non-boundary constraints are present, you have to either a) use preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! In this case algorithm will terminate with appropriate error code. INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetstpmax(minbleicstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinBLEICSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinBLEICSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied gradient, and one which uses function value only and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF() for numerical differentiation) you should choose appropriate variant of MinBLEICOptimize() - one which accepts function AND gradient or one which accepts function ONLY. Be careful to choose variant of MinBLEICOptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to MinBLEICOptimize() and specific function used to create optimizer. | USER PASSED TO MinBLEICOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ MinBLEICCreateF() | work FAIL MinBLEICCreate() | FAIL work Here "FAIL" denotes inappropriate combinations of optimizer creation function and MinBLEICOptimize() version. Attemps to use such combination (for example, to create optimizer with MinBLEICCreateF() and to pass gradient information to MinBLEICOptimize()) will lead to exception being thrown. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ ae_bool minbleiciteration(minbleicstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; ae_int_t j; double v; double vv; double v0; ae_bool b; ae_int_t mcinfo; ae_int_t actstatus; ae_int_t itidx; double penalty; double ginit; double gdecay; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; m = state->rstate.ia.ptr.p_int[1]; i = state->rstate.ia.ptr.p_int[2]; j = state->rstate.ia.ptr.p_int[3]; mcinfo = state->rstate.ia.ptr.p_int[4]; actstatus = state->rstate.ia.ptr.p_int[5]; itidx = state->rstate.ia.ptr.p_int[6]; b = state->rstate.ba.ptr.p_bool[0]; v = state->rstate.ra.ptr.p_double[0]; vv = state->rstate.ra.ptr.p_double[1]; v0 = state->rstate.ra.ptr.p_double[2]; penalty = state->rstate.ra.ptr.p_double[3]; ginit = state->rstate.ra.ptr.p_double[4]; gdecay = state->rstate.ra.ptr.p_double[5]; } else { n = 359; m = -58; i = -919; j = -909; mcinfo = 81; actstatus = 255; itidx = 74; b = ae_false; v = 809; vv = 205; v0 = -838; penalty = 939; ginit = -526; gdecay = 763; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } if( state->rstate.stage==15 ) { goto lbl_15; } if( state->rstate.stage==16 ) { goto lbl_16; } if( state->rstate.stage==17 ) { goto lbl_17; } if( state->rstate.stage==18 ) { goto lbl_18; } if( state->rstate.stage==19 ) { goto lbl_19; } if( state->rstate.stage==20 ) { goto lbl_20; } if( state->rstate.stage==21 ) { goto lbl_21; } /* * Routine body */ /* * Algorithm parameters: * * M number of L-BFGS corrections. * This coefficient remains fixed during iterations. * * GDecay desired decrease of constrained gradient during L-BFGS iterations. * This coefficient is decreased after each L-BFGS round until * it reaches minimum decay. */ m = ae_minint(5, state->nmain, _state); gdecay = minbleic_initialdecay; /* * Init */ n = state->nmain; state->steepestdescentstep = ae_false; state->userterminationneeded = ae_false; state->repterminationtype = 0; state->repinneriterationscount = 0; state->repouteriterationscount = 0; state->repnfev = 0; state->repvaridx = -1; state->repdebugeqerr = 0.0; state->repdebugfs = _state->v_nan; state->repdebugff = _state->v_nan; state->repdebugdx = _state->v_nan; if( ae_fp_neq(state->stpmax,(double)(0))&&state->prectype!=0 ) { state->repterminationtype = -10; result = ae_false; return result; } rmatrixsetlengthatleast(&state->bufyk, m+1, n, _state); rmatrixsetlengthatleast(&state->bufsk, m+1, n, _state); rvectorsetlengthatleast(&state->bufrho, m, _state); rvectorsetlengthatleast(&state->buftheta, m, _state); rvectorsetlengthatleast(&state->tmp0, n, _state); smoothnessmonitorinit(&state->smonitor, n, 1, state->smoothnessguardlevel>0, _state); for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; state->invs.ptr.p_double[i] = 1/state->s.ptr.p_double[i]; } /* * Check analytic derivative */ minbleic_clearrequestfields(state, _state); if( !(ae_fp_eq(state->diffstep,(double)(0))&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_22; } lbl_24: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xstart, &state->s, &state->bndl, &state->bndu, ae_true, state->teststep, _state) ) { goto lbl_25; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfg = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfg = ae_false; state->smonitor.fi.ptr.p_double[0] = state->f; for(i=0; i<=n-1; i++) { state->smonitor.j.ptr.pp_double[0][i] = state->g.ptr.p_double[i]; } goto lbl_24; lbl_25: lbl_22: /* * Fill TmpPrec with current preconditioner */ rvectorsetlengthatleast(&state->tmpprec, n, _state); for(i=0; i<=n-1; i++) { if( state->prectype==2 ) { state->tmpprec.ptr.p_double[i] = state->diagh.ptr.p_double[i]; continue; } if( state->prectype==3 ) { state->tmpprec.ptr.p_double[i] = 1/ae_sqr(state->s.ptr.p_double[i], _state); continue; } state->tmpprec.ptr.p_double[i] = (double)(1); } sassetprecdiag(&state->sas, &state->tmpprec, _state); /* * Start optimization */ if( !sasstartoptimization(&state->sas, &state->xstart, _state) ) { state->repterminationtype = -3; result = ae_false; return result; } /* * Main cycle of BLEIC-PG algorithm */ state->repterminationtype = 0; state->lastgoodstep = (double)(0); state->lastscaledgoodstep = (double)(0); state->maxscaledgrad = (double)(0); state->nonmonotoniccnt = ae_round(1.5*(n+state->nic), _state)+5; ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbleic_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_26; } state->needfg = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needfg = ae_false; goto lbl_27; lbl_26: state->needf = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->needf = ae_false; lbl_27: state->fc = state->f; trimprepare(state->f, &state->trimthreshold, _state); state->repnfev = state->repnfev+1; if( !state->xrep ) { goto lbl_28; } /* * Report current point */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fc; state->xupdated = ae_true; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->xupdated = ae_false; lbl_28: if( state->userterminationneeded ) { /* * User requested termination */ sasstopoptimization(&state->sas, _state); state->repterminationtype = 8; result = ae_false; return result; } lbl_30: if( ae_false ) { goto lbl_31; } /* * Preparations * * (a) calculate unconstrained gradient * (b) determine initial active set * (c) update MaxScaledGrad * (d) check F/G for NAN/INF, abnormally terminate algorithm if needed */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbleic_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_32; } /* * Analytic gradient */ state->needfg = ae_true; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->needfg = ae_false; goto lbl_33; lbl_32: /* * Numerical differentiation */ state->needf = ae_true; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->fbase = state->f; i = 0; lbl_34: if( i>n-1 ) { goto lbl_36; } v = state->x.ptr.p_double[i]; b = ae_false; if( state->hasbndl.ptr.p_bool[i] ) { b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); } if( state->hasbndu.ptr.p_bool[i] ) { b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); } if( b ) { goto lbl_37; } state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->fp2 = state->f; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); goto lbl_38; lbl_37: state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) { state->xm1 = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) { state->xp1 = state->bndu.ptr.p_double[i]; } state->x.ptr.p_double[i] = state->xm1; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->fm1 = state->f; state->x.ptr.p_double[i] = state->xp1; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->fp1 = state->f; if( ae_fp_neq(state->xm1,state->xp1) ) { state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); } else { state->g.ptr.p_double[i] = (double)(0); } lbl_38: state->x.ptr.p_double[i] = v; i = i+1; goto lbl_34; lbl_36: state->f = state->fbase; state->needf = ae_false; lbl_33: state->fc = state->f; ae_v_move(&state->ugc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); sasreactivateconstraintsprec(&state->sas, &state->ugc, _state); sasconstraineddirection(&state->sas, &state->cgc, _state); ginit = 0.0; for(i=0; i<=n-1; i++) { ginit = ginit+ae_sqr(state->cgc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } ginit = ae_sqrt(ginit, _state); state->maxscaledgrad = ae_maxreal(state->maxscaledgrad, ginit, _state); if( !ae_isfinite(ginit, _state)||!ae_isfinite(state->fc, _state) ) { /* * Abnormal termination - infinities in function/gradient */ sasstopoptimization(&state->sas, _state); state->repterminationtype = -8; result = ae_false; return result; } if( state->userterminationneeded ) { /* * User requested termination */ sasstopoptimization(&state->sas, _state); state->repterminationtype = 8; result = ae_false; return result; } /* * LBFGS stage: * * during LBFGS iterations we activate new constraints, but never * deactivate already active ones. * * we perform at most N iterations of LBFGS before re-evaluating * active set and restarting LBFGS. * * first iteration of LBFGS is a special - it is performed with * minimum set of active constraints, algorithm termination can * be performed only at this state. We call this iteration * "steepest descent step". * * About termination: * * LBFGS iterations can be terminated because of two reasons: * * "termination" - non-zero termination code in RepTerminationType, * which means that optimization is done * * "restart" - zero RepTerminationType, which means that we * have to re-evaluate active set and resume LBFGS stage. * * one more option is "refresh" - to continue LBFGS iterations, * but with all BFGS updates (Sk/Yk pairs) being dropped; * it happens after changes in active set */ state->bufsize = 0; state->steepestdescentstep = ae_true; itidx = -1; lbl_39: if( itidx>=n-1 ) { goto lbl_40; } /* * Increment iterations counter * * NOTE: we have strong reasons to use such complex scheme * instead of just for() loop - this counter may be * decreased at some occasions to perform "restart" * of an iteration. */ itidx = itidx+1; /* * At the beginning of each iteration: * * SAS.XC stores current point * * FC stores current function value * * UGC stores current unconstrained gradient * * CGC stores current constrained gradient * * D stores constrained step direction (calculated at this block) * * * Check gradient-based stopping criteria * * This stopping condition is tested only for step which is the * first step of LBFGS (subsequent steps may accumulate active * constraints thus they should NOT be used for stopping - gradient * may be small when constrained, but these constraints may be * deactivated by the subsequent steps) */ if( state->steepestdescentstep&&ae_fp_less_eq(sasscaledconstrainednorm(&state->sas, &state->ugc, _state),state->epsg) ) { /* * Gradient is small enough. * Optimization is terminated */ state->repterminationtype = 4; goto lbl_40; } /* * 1. Calculate search direction D according to L-BFGS algorithm * using constrained preconditioner to perform inner multiplication. * 2. Evaluate scaled length of direction D; restart LBFGS if D is zero * (it may be possible that we found minimum, but it is also possible * that some constraints need deactivation) * 3. If D is non-zero, try to use previous scaled step length as initial estimate for new step. */ ae_v_move(&state->work.ptr.p_double[0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=state->bufsize-1; i>=0; i--) { v = ae_v_dotproduct(&state->bufsk.ptr.pp_double[i][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->buftheta.ptr.p_double[i] = v; vv = v*state->bufrho.ptr.p_double[i]; ae_v_subd(&state->work.ptr.p_double[0], 1, &state->bufyk.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), vv); } sasconstraineddirectionprec(&state->sas, &state->work, _state); for(i=0; i<=state->bufsize-1; i++) { v = ae_v_dotproduct(&state->bufyk.ptr.pp_double[i][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); vv = state->bufrho.ptr.p_double[i]*(-v+state->buftheta.ptr.p_double[i]); ae_v_addd(&state->work.ptr.p_double[0], 1, &state->bufsk.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), vv); } sasconstraineddirection(&state->sas, &state->work, _state); ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( ae_fp_eq(v,(double)(0)) ) { /* * Search direction is zero. * If we perform "steepest descent step", algorithm is terminated. * Otherwise we just restart LBFGS. */ if( state->steepestdescentstep ) { state->repterminationtype = 4; } goto lbl_40; } ae_assert(ae_fp_greater(v,(double)(0)), "MinBLEIC: internal error", _state); if( ae_fp_greater(state->lastscaledgoodstep,(double)(0))&&ae_fp_greater(v,(double)(0)) ) { state->stp = state->lastscaledgoodstep/v; } else { state->stp = 1.0/v; } /* * Calculate bound on step length. * Step direction is stored */ sasexploredirection(&state->sas, &state->d, &state->curstpmax, &state->cidx, &state->cval, _state); state->activationstep = state->curstpmax; if( state->cidx>=0&&ae_fp_eq(state->activationstep,(double)(0)) ) { /* * We are exactly at the boundary, immediate activation * of constraint is required. LBFGS stage is continued * with "refreshed" model. * * ! IMPORTANT: we do not clear SteepestDescent flag here, * ! it is very important for correct stopping * ! of algorithm. * * ! IMPORTANT: we decrease iteration counter in order to * preserve computational budget for iterations. */ sasimmediateactivation(&state->sas, state->cidx, state->cval, _state); state->bufsize = 0; itidx = itidx-1; goto lbl_39; } if( ae_fp_greater(state->stpmax,(double)(0)) ) { v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = ae_sqrt(v, _state); if( ae_fp_greater(v,(double)(0)) ) { state->curstpmax = ae_minreal(state->curstpmax, state->stpmax/v, _state); } } /* * Report beginning of line search (if requested by caller). * See description of the MinBLEICState for more information * about fields accessible to caller. * * Caller may do following: * * change State.Stp and load better initial estimate of * the step length. * Caller may not terminate algorithm. */ if( !state->drep ) { goto lbl_41; } minbleic_clearrequestfields(state, _state); state->lsstart = ae_true; state->boundedstep = state->cidx>=0; ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->lsstart = ae_false; lbl_41: /* * Minimize F(x+alpha*d) */ ae_v_move(&state->xn.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgn.ptr.p_double[0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugn.ptr.p_double[0], 1, &state->ugc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fn = state->fc; state->mcstage = 0; smoothnessmonitorstartlinesearch1u(&state->smonitor, &state->s, &state->invs, &state->xn, state->fn, &state->ugn, _state); mcsrch(n, &state->xn, &state->fn, &state->ugn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); lbl_43: if( state->mcstage==0 ) { goto lbl_44; } /* * Perform correction (constraints are enforced) * Copy XN to X */ sascorrection(&state->sas, &state->xn, &penalty, _state); for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xn.ptr.p_double[i]; } /* * Gradient, either user-provided or numerical differentiation */ minbleic_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_45; } /* * Analytic gradient */ state->needfg = ae_true; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->needfg = ae_false; state->repnfev = state->repnfev+1; goto lbl_46; lbl_45: /* * Numerical differentiation */ state->needf = ae_true; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->fbase = state->f; i = 0; lbl_47: if( i>n-1 ) { goto lbl_49; } v = state->x.ptr.p_double[i]; b = ae_false; if( state->hasbndl.ptr.p_bool[i] ) { b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); } if( state->hasbndu.ptr.p_bool[i] ) { b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); } if( b ) { goto lbl_50; } state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 15; goto lbl_rcomm; lbl_15: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 16; goto lbl_rcomm; lbl_16: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 17; goto lbl_rcomm; lbl_17: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 18; goto lbl_rcomm; lbl_18: state->fp2 = state->f; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); state->repnfev = state->repnfev+4; goto lbl_51; lbl_50: state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) { state->xm1 = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) { state->xp1 = state->bndu.ptr.p_double[i]; } state->x.ptr.p_double[i] = state->xm1; state->rstate.stage = 19; goto lbl_rcomm; lbl_19: state->fm1 = state->f; state->x.ptr.p_double[i] = state->xp1; state->rstate.stage = 20; goto lbl_rcomm; lbl_20: state->fp1 = state->f; if( ae_fp_neq(state->xm1,state->xp1) ) { state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); } else { state->g.ptr.p_double[i] = (double)(0); } state->repnfev = state->repnfev+2; lbl_51: state->x.ptr.p_double[i] = v; i = i+1; goto lbl_47; lbl_49: state->f = state->fbase; state->needf = ae_false; lbl_46: /* * Back to MCSRCH * * NOTE: penalty term from correction is added to FN in order * to penalize increase in infeasibility. */ smoothnessmonitorenqueuepoint1u(&state->smonitor, &state->s, &state->invs, &state->d, state->stp, &state->x, state->f, &state->g, _state); state->fn = state->f+minbleic_penaltyfactor*state->maxscaledgrad*penalty; ae_v_move(&state->cgn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); sasconstraineddirection(&state->sas, &state->cgn, _state); trimfunction(&state->fn, &state->cgn, n, state->trimthreshold, _state); mcsrch(n, &state->xn, &state->fn, &state->ugn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); goto lbl_43; lbl_44: ae_v_moveneg(&state->bufsk.ptr.pp_double[state->bufsize][0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_moveneg(&state->bufyk.ptr.pp_double[state->bufsize][0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->bufsk.ptr.pp_double[state->bufsize][0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->bufyk.ptr.pp_double[state->bufsize][0], 1, &state->cgn.ptr.p_double[0], 1, ae_v_len(0,n-1)); smoothnessmonitorfinalizelinesearch(&state->smonitor, _state); /* * Check for presence of NAN/INF in function/gradient */ v = state->fn; for(i=0; i<=n-1; i++) { v = 0.1*v+state->ugn.ptr.p_double[i]; } if( !ae_isfinite(v, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; goto lbl_40; } /* * Handle possible failure of the line search or request for termination */ if( mcinfo!=1&&mcinfo!=5 ) { /* * We can not find step which decreases function value. We have * two possibilities: * (a) numerical properties of the function do not allow us to * find good step. * (b) we are close to activation of some constraint, and it is * so close that step which activates it leads to change in * target function which is smaller than numerical noise. * * Optimization algorithm must be able to handle case (b), because * inability to handle it will cause failure when algorithm * started very close to boundary of the feasible area. * * In order to correctly handle such cases we allow limited amount * of small steps which increase function value. */ v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->d.ptr.p_double[i]*state->curstpmax/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); b = ae_false; if( (state->cidx>=0&&ae_fp_less_eq(v,minbleic_maxnonmonotoniclen))&&state->nonmonotoniccnt>0 ) { /* * We try to enforce non-monotonic step: * * Stp := CurStpMax * * MCINFO := 5 * * XN := XC+CurStpMax*D * * non-monotonic counter is decreased * * NOTE: UGN/CGN are not updated because step is so short that we assume that * GN is approximately equal to GC. * * NOTE: prior to enforcing such step we check that it does not increase infeasibility * of constraints beyond tolerable level */ v = state->curstpmax; ae_v_move(&state->tmp0.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_addd(&state->tmp0.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), v); if( ae_fp_less_eq(minbleic_feasibilityerror(&state->tmp0, &state->s, n, &state->cleic, state->nec, state->nic, _state),minbleic_nmstol*ae_sqrt((double)(n), _state)*ae_machineepsilon) ) { state->stp = state->curstpmax; mcinfo = 5; ae_v_move(&state->xn.ptr.p_double[0], 1, &state->tmp0.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->nonmonotoniccnt = state->nonmonotoniccnt-1; b = ae_true; } } if( !b ) { /* * Numerical properties of the function do not allow * us to solve problem. Here we have two possibilities: * * if it is "steepest descent" step, we can terminate * algorithm because we are close to minimum * * if it is NOT "steepest descent" step, we should restart * LBFGS iterations. */ if( state->steepestdescentstep ) { /* * Algorithm is terminated */ state->repterminationtype = 7; goto lbl_40; } else { /* * Re-evaluate active set and restart LBFGS */ goto lbl_40; } } } if( state->userterminationneeded ) { goto lbl_40; } /* * Current point is updated: * * move XC/FC/GC to XP/FP/GP * * change current point remembered by SAS structure * * move XN/FN/GN to XC/FC/GC * * report current point and update iterations counter * * if MCINFO=1, push new pair SK/YK to LBFGS buffer */ state->fp = state->fc; ae_v_move(&state->xp.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fc = state->fn; ae_v_move(&state->cgc.ptr.p_double[0], 1, &state->cgn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugc.ptr.p_double[0], 1, &state->ugn.ptr.p_double[0], 1, ae_v_len(0,n-1)); actstatus = sasmoveto(&state->sas, &state->xn, state->cidx>=0&&ae_fp_greater_eq(state->stp,state->activationstep), state->cidx, state->cval, _state); if( !state->xrep ) { goto lbl_52; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbleic_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 21; goto lbl_rcomm; lbl_21: state->xupdated = ae_false; lbl_52: state->repinneriterationscount = state->repinneriterationscount+1; if( mcinfo==1 ) { /* * Accept new LBFGS update given by Sk,Yk */ if( state->bufsize==m ) { /* * Buffer is full, shift contents by one row */ for(i=0; i<=state->bufsize-1; i++) { ae_v_move(&state->bufsk.ptr.pp_double[i][0], 1, &state->bufsk.ptr.pp_double[i+1][0], 1, ae_v_len(0,n-1)); ae_v_move(&state->bufyk.ptr.pp_double[i][0], 1, &state->bufyk.ptr.pp_double[i+1][0], 1, ae_v_len(0,n-1)); } for(i=0; i<=state->bufsize-2; i++) { state->bufrho.ptr.p_double[i] = state->bufrho.ptr.p_double[i+1]; state->buftheta.ptr.p_double[i] = state->buftheta.ptr.p_double[i+1]; } } else { /* * Buffer is not full, increase buffer size by 1 */ state->bufsize = state->bufsize+1; } v = ae_v_dotproduct(&state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, &state->bufsk.ptr.pp_double[state->bufsize-1][0], 1, ae_v_len(0,n-1)); vv = ae_v_dotproduct(&state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, &state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, ae_v_len(0,n-1)); if( ae_fp_eq(v,(double)(0))||ae_fp_eq(vv,(double)(0)) ) { /* * Strange internal error in LBFGS - either YK=0 * (which should not have been) or (SK,YK)=0 (again, * unexpected). It should not take place because * MCINFO=1, which signals "good" step. But just * to be sure we have special branch of code which * restarts LBFGS */ goto lbl_40; } state->bufrho.ptr.p_double[state->bufsize-1] = 1/v; ae_assert(state->bufsize<=m, "MinBLEIC: internal error", _state); /* * Update length of the good step */ v = (double)(0); vv = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr((state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); vv = vv+ae_sqr(state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i], _state); } state->lastgoodstep = ae_sqrt(vv, _state); minbleic_updateestimateofgoodstep(&state->lastscaledgoodstep, ae_sqrt(v, _state), _state); } /* * Check stopping criteria * * Step size and function-based stopping criteria are tested only * for step which satisfies Wolfe conditions and is the first step of * LBFGS (subsequent steps may accumulate active constraints thus * they should NOT be used for stopping; step size or function change * may be small when constrained, but these constraints may be * deactivated by the subsequent steps). * * MaxIts-based stopping condition is checked for all kinds of steps. */ if( mcinfo==1&&state->steepestdescentstep ) { /* * Step is small enough */ v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr((state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( ae_fp_less_eq(v,state->epsx) ) { state->repterminationtype = 2; goto lbl_40; } /* * Function change is small enough */ if( ae_fp_less_eq(ae_fabs(state->fp-state->fc, _state),state->epsf*ae_maxreal(ae_fabs(state->fc, _state), ae_maxreal(ae_fabs(state->fp, _state), 1.0, _state), _state)) ) { state->repterminationtype = 1; goto lbl_40; } } if( state->maxits>0&&state->repinneriterationscount>=state->maxits ) { state->repterminationtype = 5; goto lbl_40; } /* * Clear "steepest descent" flag. */ state->steepestdescentstep = ae_false; /* * Smooth reset (LBFGS memory model is refreshed) or hard restart: * * LBFGS model is refreshed, if line search was performed with activation of constraints * * algorithm is restarted if scaled gradient decreased below GDecay */ if( actstatus>=0 ) { state->bufsize = 0; goto lbl_39; } v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->cgc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( ae_fp_less(ae_sqrt(v, _state),gdecay*ginit) ) { goto lbl_40; } goto lbl_39; lbl_40: if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; goto lbl_31; } if( state->repterminationtype!=0 ) { /* * Algorithm terminated */ goto lbl_31; } /* * Decrease decay coefficient. Subsequent L-BFGS stages will * have more stringent stopping criteria. */ gdecay = ae_maxreal(gdecay*minbleic_decaycorrection, minbleic_mindecay, _state); goto lbl_30; lbl_31: sasstopoptimization(&state->sas, _state); state->repouteriterationscount = 1; result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = m; state->rstate.ia.ptr.p_int[2] = i; state->rstate.ia.ptr.p_int[3] = j; state->rstate.ia.ptr.p_int[4] = mcinfo; state->rstate.ia.ptr.p_int[5] = actstatus; state->rstate.ia.ptr.p_int[6] = itidx; state->rstate.ba.ptr.p_bool[0] = b; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = vv; state->rstate.ra.ptr.p_double[2] = v0; state->rstate.ra.ptr.p_double[3] = penalty; state->rstate.ra.ptr.p_double[4] = ginit; state->rstate.ra.ptr.p_double[5] = gdecay; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minbleicoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minbleicsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardgradient(minbleicstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinBLEICOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinBLEICOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardsmoothness(minbleicstate* state, ae_int_t level, ae_state *_state) { ae_assert(level==0||level==1, "MinBLEICOptGuardSmoothness: unexpected value of level parameter", _state); state->smoothnessguardlevel = level; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minbleicoptguardgradient() for gradient verification * minbleicoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minbleicoptguardnonc1test0results() * minbleicoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardresults(minbleicstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardnonc1test0results(minbleicstate* state, optguardnonc1test0report* strrep, optguardnonc1test0report* lngrep, ae_state *_state) { _optguardnonc1test0report_clear(strrep); _optguardnonc1test0report_clear(lngrep); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbleicoptguardnonc1test1results(minbleicstate* state, optguardnonc1test1report* strrep, optguardnonc1test1report* lngrep, ae_state *_state) { _optguardnonc1test1report_clear(strrep); _optguardnonc1test1report_clear(lngrep); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* BLEIC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 inconsistent constraints. Feasible point is either nonexistent or too hard to find. Try to restart optimizer with better initial approximation * 1 relative function improvement is no more than EpsF. * 2 scaled step is no more than EpsX. * 4 scaled gradient norm is no more than EpsG. * 5 MaxIts steps was taken * 8 terminated by user who called minbleicrequesttermination(). X contains point which was "current accepted" when termination request was submitted. More information about fields of this structure can be found in the comments on MinBLEICReport datatype. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicresults(minbleicstate* state, /* Real */ ae_vector* x, minbleicreport* rep, ae_state *_state) { ae_vector_clear(x); _minbleicreport_clear(rep); minbleicresultsbuf(state, x, rep, _state); } /************************************************************************* BLEIC results Buffered implementation of MinBLEICResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicresultsbuf(minbleicstate* state, /* Real */ ae_vector* x, minbleicreport* rep, ae_state *_state) { ae_int_t i; if( x->cntnmain ) { ae_vector_set_length(x, state->nmain, _state); } rep->iterationscount = state->repinneriterationscount; rep->inneriterationscount = state->repinneriterationscount; rep->outeriterationscount = state->repouteriterationscount; rep->nfev = state->repnfev; rep->varidx = state->repvaridx; rep->terminationtype = state->repterminationtype; if( state->repterminationtype>0 ) { ae_v_move(&x->ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,state->nmain-1)); } else { for(i=0; i<=state->nmain-1; i++) { x->ptr.p_double[i] = _state->v_nan; } } rep->debugeqerr = state->repdebugeqerr; rep->debugfs = state->repdebugfs; rep->debugff = state->repdebugff; rep->debugdx = state->repdebugdx; rep->debugfeasqpits = state->repdebugfeasqpits; rep->debugfeasgpaits = state->repdebugfeasgpaits; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinBLEICCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicrestartfrom(minbleicstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->nmain; /* * First, check for errors in the inputs */ ae_assert(x->cnt>=n, "MinBLEICRestartFrom: Length(X)xstart.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * prepare RComm facilities */ ae_vector_set_length(&state->rstate.ia, 6+1, _state); ae_vector_set_length(&state->rstate.ba, 0+1, _state); ae_vector_set_length(&state->rstate.ra, 5+1, _state); state->rstate.stage = -1; minbleic_clearrequestfields(state, _state); sasstopoptimization(&state->sas, _state); } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minbleicrequesttermination(minbleicstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* This subroutine finalizes internal structures after emergency termination from State.LSStart report (see comments on MinBLEICState for more information). INPUT PARAMETERS: State - structure after exit from LSStart report -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicemergencytermination(minbleicstate* state, ae_state *_state) { sasstopoptimization(&state->sas, _state); } /************************************************************************* Clears request fileds (to be sure that we don't forget to clear something) *************************************************************************/ static void minbleic_clearrequestfields(minbleicstate* state, ae_state *_state) { state->needf = ae_false; state->needfg = ae_false; state->xupdated = ae_false; state->lsstart = ae_false; } /************************************************************************* Internal initialization subroutine *************************************************************************/ static void minbleic_minbleicinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbleicstate* state, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); /* * Initialize */ state->teststep = (double)(0); state->smoothnessguardlevel = 0; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); state->nmain = n; state->diffstep = diffstep; sasinit(n, &state->sas, _state); ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->hasbndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->hasbndu, n, _state); ae_vector_set_length(&state->xstart, n, _state); ae_vector_set_length(&state->cgc, n, _state); ae_vector_set_length(&state->ugc, n, _state); ae_vector_set_length(&state->xn, n, _state); ae_vector_set_length(&state->cgn, n, _state); ae_vector_set_length(&state->ugn, n, _state); ae_vector_set_length(&state->xp, n, _state); ae_vector_set_length(&state->d, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->invs, n, _state); ae_vector_set_length(&state->lastscaleused, n, _state); ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->g, n, _state); ae_vector_set_length(&state->work, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->hasbndl.ptr.p_bool[i] = ae_false; state->bndu.ptr.p_double[i] = _state->v_posinf; state->hasbndu.ptr.p_bool[i] = ae_false; state->s.ptr.p_double[i] = 1.0; state->invs.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; } minbleicsetlc(state, &c, &ct, 0, _state); minbleicsetcond(state, 0.0, 0.0, 0.0, 0, _state); minbleicsetxrep(state, ae_false, _state); minbleicsetdrep(state, ae_false, _state); minbleicsetstpmax(state, 0.0, _state); minbleicsetprecdefault(state, _state); minbleicrestartfrom(state, x, _state); ae_frame_leave(_state); } /************************************************************************* This subroutine updates estimate of the good step length given: 1) previous estimate 2) new length of the good step It makes sure that estimate does not change too rapidly - ratio of new and old estimates will be at least 0.01, at most 100.0 In case previous estimate of good step is zero (no estimate), new estimate is used unconditionally. -- ALGLIB -- Copyright 16.01.2013 by Bochkanov Sergey *************************************************************************/ static void minbleic_updateestimateofgoodstep(double* estimate, double newstep, ae_state *_state) { if( ae_fp_eq(*estimate,(double)(0)) ) { *estimate = newstep; return; } if( ae_fp_less(newstep,*estimate*0.01) ) { *estimate = *estimate*0.01; return; } if( ae_fp_greater(newstep,*estimate*100) ) { *estimate = *estimate*100; return; } *estimate = newstep; } /************************************************************************* This subroutine estimates relative feasibility error of the point. INPUT PARAMETERS: X - current point (unscaled) S - scale vector N - dimensionality CLEIC - array[NEC+NIC,N+1], constraint matrix, may be unnormalized NEC - number of equality constraints (come first) NIC - number of inequality constraints (come last) RESULT feasibility error, good value is ~1E-16...1E-14 -- ALGLIB -- Copyright 16.01.2013 by Bochkanov Sergey *************************************************************************/ static double minbleic_feasibilityerror(/* Real */ ae_vector* x, /* Real */ ae_vector* s, ae_int_t n, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, ae_state *_state) { ae_int_t i; ae_int_t j; double v; double v0; double v1; double vc; double vx; double result; result = (double)(0); for(i=0; i<=nec+nic-1; i++) { v = -cleic->ptr.pp_double[i][n]; v0 = (double)(0); v1 = (double)(0); for(j=0; j<=n-1; j++) { vc = cleic->ptr.pp_double[i][j]*s->ptr.p_double[j]; vx = x->ptr.p_double[j]/s->ptr.p_double[j]; v = v+vc*vx; v0 = v0+ae_sqr(vc, _state); v1 = v1+ae_sqr(vx, _state); } v0 = coalesce(ae_sqrt(v0, _state), (double)(1), _state); v1 = ae_maxreal(ae_sqrt(v1, _state), (double)(1), _state); if( isas, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); ae_vector_init(&p->ugc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ugn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xstart, 0, DT_REAL, _state, make_automatic); _snnlssolver_init(&p->solver, _state, make_automatic); ae_vector_init(&p->tmpprec, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic); _linminstate_init(&p->lstate, _state, make_automatic); ae_matrix_init(&p->bufyk, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufsk, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufrho, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->buftheta, 0, DT_REAL, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invs, 0, DT_REAL, _state, make_automatic); } void _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minbleicstate *dst = (minbleicstate*)_dst; minbleicstate *src = (minbleicstate*)_src; dst->nmain = src->nmain; dst->nslack = src->nslack; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->drep = src->drep; dst->stpmax = src->stpmax; dst->diffstep = src->diffstep; _sactiveset_init_copy(&dst->sas, &src->sas, _state, make_automatic); ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); dst->prectype = src->prectype; ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic); ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needf = src->needf; dst->needfg = src->needfg; dst->xupdated = src->xupdated; dst->lsstart = src->lsstart; dst->steepestdescentstep = src->steepestdescentstep; dst->boundedstep = src->boundedstep; dst->userterminationneeded = src->userterminationneeded; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); ae_vector_init_copy(&dst->ugc, &src->ugc, _state, make_automatic); ae_vector_init_copy(&dst->cgc, &src->cgc, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->ugn, &src->ugn, _state, make_automatic); ae_vector_init_copy(&dst->cgn, &src->cgn, _state, make_automatic); ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic); dst->fc = src->fc; dst->fn = src->fn; dst->fp = src->fp; ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic); dst->nec = src->nec; dst->nic = src->nic; dst->lastgoodstep = src->lastgoodstep; dst->lastscaledgoodstep = src->lastscaledgoodstep; dst->maxscaledgrad = src->maxscaledgrad; ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repnfev = src->repnfev; dst->repvaridx = src->repvaridx; dst->repterminationtype = src->repterminationtype; dst->repdebugeqerr = src->repdebugeqerr; dst->repdebugfs = src->repdebugfs; dst->repdebugff = src->repdebugff; dst->repdebugdx = src->repdebugdx; dst->repdebugfeasqpits = src->repdebugfeasqpits; dst->repdebugfeasgpaits = src->repdebugfeasgpaits; ae_vector_init_copy(&dst->xstart, &src->xstart, _state, make_automatic); _snnlssolver_init_copy(&dst->solver, &src->solver, _state, make_automatic); dst->fbase = src->fbase; dst->fm2 = src->fm2; dst->fm1 = src->fm1; dst->fp1 = src->fp1; dst->fp2 = src->fp2; dst->xm1 = src->xm1; dst->xp1 = src->xp1; dst->gm1 = src->gm1; dst->gp1 = src->gp1; dst->cidx = src->cidx; dst->cval = src->cval; ae_vector_init_copy(&dst->tmpprec, &src->tmpprec, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); dst->nfev = src->nfev; dst->mcstage = src->mcstage; dst->stp = src->stp; dst->curstpmax = src->curstpmax; dst->activationstep = src->activationstep; ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic); _linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic); dst->trimthreshold = src->trimthreshold; dst->nonmonotoniccnt = src->nonmonotoniccnt; ae_matrix_init_copy(&dst->bufyk, &src->bufyk, _state, make_automatic); ae_matrix_init_copy(&dst->bufsk, &src->bufsk, _state, make_automatic); ae_vector_init_copy(&dst->bufrho, &src->bufrho, _state, make_automatic); ae_vector_init_copy(&dst->buftheta, &src->buftheta, _state, make_automatic); dst->bufsize = src->bufsize; dst->teststep = src->teststep; dst->smoothnessguardlevel = src->smoothnessguardlevel; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); ae_vector_init_copy(&dst->invs, &src->invs, _state, make_automatic); } void _minbleicstate_clear(void* _p) { minbleicstate *p = (minbleicstate*)_p; ae_touch_ptr((void*)p); _sactiveset_clear(&p->sas); ae_vector_clear(&p->s); ae_vector_clear(&p->diagh); ae_vector_clear(&p->x); ae_vector_clear(&p->g); _rcommstate_clear(&p->rstate); ae_vector_clear(&p->ugc); ae_vector_clear(&p->cgc); ae_vector_clear(&p->xn); ae_vector_clear(&p->ugn); ae_vector_clear(&p->cgn); ae_vector_clear(&p->xp); ae_vector_clear(&p->d); ae_matrix_clear(&p->cleic); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->xstart); _snnlssolver_clear(&p->solver); ae_vector_clear(&p->tmpprec); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->work); _linminstate_clear(&p->lstate); ae_matrix_clear(&p->bufyk); ae_matrix_clear(&p->bufsk); ae_vector_clear(&p->bufrho); ae_vector_clear(&p->buftheta); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); ae_vector_clear(&p->invs); } void _minbleicstate_destroy(void* _p) { minbleicstate *p = (minbleicstate*)_p; ae_touch_ptr((void*)p); _sactiveset_destroy(&p->sas); ae_vector_destroy(&p->s); ae_vector_destroy(&p->diagh); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); _rcommstate_destroy(&p->rstate); ae_vector_destroy(&p->ugc); ae_vector_destroy(&p->cgc); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->ugn); ae_vector_destroy(&p->cgn); ae_vector_destroy(&p->xp); ae_vector_destroy(&p->d); ae_matrix_destroy(&p->cleic); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->xstart); _snnlssolver_destroy(&p->solver); ae_vector_destroy(&p->tmpprec); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->work); _linminstate_destroy(&p->lstate); ae_matrix_destroy(&p->bufyk); ae_matrix_destroy(&p->bufsk); ae_vector_destroy(&p->bufrho); ae_vector_destroy(&p->buftheta); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); ae_vector_destroy(&p->invs); } void _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minbleicreport *p = (minbleicreport*)_p; ae_touch_ptr((void*)p); } void _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minbleicreport *dst = (minbleicreport*)_dst; minbleicreport *src = (minbleicreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->varidx = src->varidx; dst->terminationtype = src->terminationtype; dst->debugeqerr = src->debugeqerr; dst->debugfs = src->debugfs; dst->debugff = src->debugff; dst->debugdx = src->debugdx; dst->debugfeasqpits = src->debugfeasqpits; dst->debugfeasgpaits = src->debugfeasgpaits; dst->inneriterationscount = src->inneriterationscount; dst->outeriterationscount = src->outeriterationscount; } void _minbleicreport_clear(void* _p) { minbleicreport *p = (minbleicreport*)_p; ae_touch_ptr((void*)p); } void _minbleicreport_destroy(void* _p) { minbleicreport *p = (minbleicreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This function initializes QPBLEICSettings structure with default settings. Newly created structure MUST be initialized by default settings - or by copy of the already initialized structure. -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qpbleicloaddefaults(ae_int_t nmain, qpbleicsettings* s, ae_state *_state) { s->epsg = 0.0; s->epsf = 0.0; s->epsx = 1.0E-6; s->maxits = 0; } /************************************************************************* This function initializes QPBLEICSettings structure with copy of another, already initialized structure. -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qpbleiccopysettings(qpbleicsettings* src, qpbleicsettings* dst, ae_state *_state) { dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; } /************************************************************************* This function runs QPBLEIC solver; it returns after optimization process was completed. Following QP problem is solved: min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) subject to boundary constraints. INPUT PARAMETERS: AC - for dense problems (AKind=0), A-term of CQM object contains system matrix. Other terms are unspecified and should not be referenced. SparseAC - for sparse problems (AKind=1 AKind - sparse matrix format: * 0 for dense matrix * 1 for sparse matrix SparseUpper - which triangle of SparseAC stores matrix - upper or lower one (for dense matrices this parameter is not actual). AbsASum - SUM(|A[i,j]|) AbsASum2 - SUM(A[i,j]^2) BC - linear term, array[NC] BndLC - lower bound, array[NC] BndUC - upper bound, array[NC] SC - scale vector, array[NC]: * I-th element contains scale of I-th variable, * SC[I]>0 XOriginC - origin term, array[NC]. Can be zero. NC - number of variables in the original formulation (no slack variables). CLEICC - linear equality/inequality constraints. Present version of this function does NOT provide publicly available support for linear constraints. This feature will be introduced in the future versions of the function. NEC, NIC - number of equality/inequality constraints. MUST BE ZERO IN THE CURRENT VERSION!!! Settings - QPBLEICSettings object initialized by one of the initialization functions. SState - object which stores temporaries: * if uninitialized object was passed, FirstCall parameter MUST be set to True; object will be automatically initialized by the function, and FirstCall will be set to False. * if FirstCall=False, it is assumed that this parameter was already initialized by previous call to this function with same problem dimensions (variable count N). FirstCall - whether it is first call of this function for this specific instance of SState, with this number of variables N specified. XS - initial point, array[NC] OUTPUT PARAMETERS: XS - last point FirstCall - uncondtionally set to False TerminationType-termination type: * * * -- ALGLIB -- Copyright 14.05.2011 by Bochkanov Sergey *************************************************************************/ void qpbleicoptimize(convexquadraticmodel* a, sparsematrix* sparsea, ae_int_t akind, ae_bool sparseaupper, double absasum, double absasum2, /* Real */ ae_vector* b, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* xorigin, ae_int_t n, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, qpbleicsettings* settings, qpbleicbuffers* sstate, ae_bool* firstcall, /* Real */ ae_vector* xs, ae_int_t* terminationtype, ae_state *_state) { ae_int_t i; double d2; double d1; double d0; double v; double v0; double v1; double md; double mx; double mb; ae_int_t d1est; ae_int_t d2est; *terminationtype = 0; ae_assert(akind==0||akind==1, "QPBLEICOptimize: unexpected AKind", _state); sstate->repinneriterationscount = 0; sstate->repouteriterationscount = 0; *terminationtype = 0; /* * Prepare solver object, if needed */ if( *firstcall ) { minbleiccreate(n, xs, &sstate->solver, _state); *firstcall = ae_false; } /* * Prepare max(|B|) */ mb = 0.0; for(i=0; i<=n-1; i++) { mb = ae_maxreal(mb, ae_fabs(b->ptr.p_double[i], _state), _state); } /* * Temporaries */ ivectorsetlengthatleast(&sstate->tmpi, nec+nic, _state); rvectorsetlengthatleast(&sstate->tmp0, n, _state); rvectorsetlengthatleast(&sstate->tmp1, n, _state); for(i=0; i<=nec-1; i++) { sstate->tmpi.ptr.p_int[i] = 0; } for(i=0; i<=nic-1; i++) { sstate->tmpi.ptr.p_int[nec+i] = -1; } minbleicsetlc(&sstate->solver, cleic, &sstate->tmpi, nec+nic, _state); minbleicsetbc(&sstate->solver, bndl, bndu, _state); minbleicsetdrep(&sstate->solver, ae_true, _state); minbleicsetcond(&sstate->solver, ae_minrealnumber, 0.0, 0.0, settings->maxits, _state); minbleicsetscale(&sstate->solver, s, _state); minbleicsetprecscale(&sstate->solver, _state); minbleicrestartfrom(&sstate->solver, xs, _state); while(minbleiciteration(&sstate->solver, _state)) { /* * Line search started */ if( sstate->solver.lsstart ) { /* * Iteration counters: * * inner iterations count is increased on every line search * * outer iterations count is increased only at steepest descent line search */ inc(&sstate->repinneriterationscount, _state); if( sstate->solver.steepestdescentstep ) { inc(&sstate->repouteriterationscount, _state); } /* * Build quadratic model of F along descent direction: * * F(x+alpha*d) = D2*alpha^2 + D1*alpha + D0 * * Calculate estimates of linear and quadratic term * (term magnitude is compared with magnitude of numerical errors) */ d0 = sstate->solver.f; d1 = ae_v_dotproduct(&sstate->solver.d.ptr.p_double[0], 1, &sstate->solver.g.ptr.p_double[0], 1, ae_v_len(0,n-1)); d2 = (double)(0); if( akind==0 ) { d2 = cqmxtadx2(a, &sstate->solver.d, &sstate->tmp0, _state); } if( akind==1 ) { sparsesmv(sparsea, sparseaupper, &sstate->solver.d, &sstate->tmp0, _state); d2 = 0.0; for(i=0; i<=n-1; i++) { d2 = d2+sstate->solver.d.ptr.p_double[i]*sstate->tmp0.ptr.p_double[i]; } d2 = 0.5*d2; } mx = 0.0; md = 0.0; for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(sstate->solver.x.ptr.p_double[i], _state), _state); md = ae_maxreal(md, ae_fabs(sstate->solver.d.ptr.p_double[i], _state), _state); } estimateparabolicmodel(absasum, absasum2, mx, mb, md, d1, d2, &d1est, &d2est, _state); /* * Tests for "normal" convergence. * * This line search may be started from steepest descent * stage (stage 2) or from L-BFGS stage (stage 3) of the * BLEIC algorithm. Depending on stage type, different * checks are performed. * * Say, L-BFGS stage is an equality-constrained refinement * stage of BLEIC. This stage refines current iterate * under "frozen" equality constraints. We can terminate * iterations at this stage only when we encounter * unconstrained direction of negative curvature. In all * other cases (say, when constrained gradient is zero) * we should not terminate algorithm because everything may * change after de-activating presently active constraints. * * Tests for convergence are performed only at "steepest descent" stage * of the BLEIC algorithm, and only when function is non-concave * (D2 is positive or approximately zero) along direction D. * * NOTE: we do not test iteration count (MaxIts) here, because * this stopping condition is tested by BLEIC itself. */ if( sstate->solver.steepestdescentstep&&d2est>=0 ) { if( d1est>=0 ) { /* * "Emergency" stopping condition: D is non-descent direction. * Sometimes it is possible because of numerical noise in the * target function. */ *terminationtype = 4; for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]; } break; } if( d2est>0 ) { /* * Stopping condition #4 - gradient norm is small: * * 1. rescale State.Solver.D and State.Solver.G according to * current scaling, store results to Tmp0 and Tmp1. * 2. Normalize Tmp0 (scaled direction vector). * 3. compute directional derivative (in scaled variables), * which is equal to DOTPRODUCT(Tmp0,Tmp1). */ v = (double)(0); for(i=0; i<=n-1; i++) { sstate->tmp0.ptr.p_double[i] = sstate->solver.d.ptr.p_double[i]/s->ptr.p_double[i]; sstate->tmp1.ptr.p_double[i] = sstate->solver.g.ptr.p_double[i]*s->ptr.p_double[i]; v = v+ae_sqr(sstate->tmp0.ptr.p_double[i], _state); } ae_assert(ae_fp_greater(v,(double)(0)), "QPBLEICOptimize: inernal errror (scaled direction is zero)", _state); v = 1/ae_sqrt(v, _state); ae_v_muld(&sstate->tmp0.ptr.p_double[0], 1, ae_v_len(0,n-1), v); v = ae_v_dotproduct(&sstate->tmp0.ptr.p_double[0], 1, &sstate->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_fp_less_eq(ae_fabs(v, _state),settings->epsg) ) { *terminationtype = 4; for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]; } break; } /* * Stopping condition #1 - relative function improvement is small: * * 1. calculate steepest descent step: V = -D1/(2*D2) * 2. calculate function change: V1= D2*V^2 + D1*V * 3. stop if function change is small enough */ v = -d1/(2*d2); v1 = d2*v*v+d1*v; if( ae_fp_less_eq(ae_fabs(v1, _state),settings->epsf*ae_maxreal(d0, 1.0, _state)) ) { *terminationtype = 1; for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]; } break; } /* * Stopping condition #2 - scaled step is small: * * 1. calculate step multiplier V0 (step itself is D*V0) * 2. calculate scaled step length V * 3. stop if step is small enough */ v0 = -d1/(2*d2); v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(v0*sstate->solver.d.ptr.p_double[i]/s->ptr.p_double[i], _state); } if( ae_fp_less_eq(ae_sqrt(v, _state),settings->epsx) ) { *terminationtype = 2; for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]; } break; } } } /* * Test for unconstrained direction of negative curvature */ if( (d2est<0||(d2est==0&&d1est<0))&&!sstate->solver.boundedstep ) { /* * Function is unbounded from below: * * function will decrease along D, i.e. either: * * D2<0 * * D2=0 and D1<0 * * step is unconstrained * * If these conditions are true, we abnormally terminate QP * algorithm with return code -4 (we can do so at any stage * of BLEIC - whether it is L-BFGS or steepest descent one). */ *terminationtype = -4; for(i=0; i<=n-1; i++) { xs->ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]; } break; } /* * Suggest new step (only if D1 is negative far away from zero, * D2 is positive far away from zero). */ if( d1est<0&&d2est>0 ) { sstate->solver.stp = safeminposrv(-d1, 2*d2, sstate->solver.curstpmax, _state); } } /* * Gradient evaluation */ if( sstate->solver.needfg ) { for(i=0; i<=n-1; i++) { sstate->tmp0.ptr.p_double[i] = sstate->solver.x.ptr.p_double[i]-xorigin->ptr.p_double[i]; } if( akind==0 ) { cqmadx(a, &sstate->tmp0, &sstate->tmp1, _state); } if( akind==1 ) { sparsesmv(sparsea, sparseaupper, &sstate->tmp0, &sstate->tmp1, _state); } v0 = ae_v_dotproduct(&sstate->tmp0.ptr.p_double[0], 1, &sstate->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); v1 = ae_v_dotproduct(&sstate->tmp0.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,n-1)); sstate->solver.f = 0.5*v0+v1; ae_v_move(&sstate->solver.g.ptr.p_double[0], 1, &sstate->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&sstate->solver.g.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,n-1)); } } if( *terminationtype==0 ) { /* * BLEIC optimizer was terminated by one of its inner stopping * conditions. Usually it is iteration counter (if such * stopping condition was specified by user). */ minbleicresultsbuf(&sstate->solver, xs, &sstate->solverrep, _state); *terminationtype = sstate->solverrep.terminationtype; } else { /* * BLEIC optimizer was terminated in "emergency" mode by QP * solver. * * NOTE: such termination is "emergency" only when viewed from * BLEIC's position. QP solver sees such termination as * routine one, triggered by QP's stopping criteria. */ minbleicemergencytermination(&sstate->solver, _state); } } void _qpbleicsettings_init(void* _p, ae_state *_state, ae_bool make_automatic) { qpbleicsettings *p = (qpbleicsettings*)_p; ae_touch_ptr((void*)p); } void _qpbleicsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qpbleicsettings *dst = (qpbleicsettings*)_dst; qpbleicsettings *src = (qpbleicsettings*)_src; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; } void _qpbleicsettings_clear(void* _p) { qpbleicsettings *p = (qpbleicsettings*)_p; ae_touch_ptr((void*)p); } void _qpbleicsettings_destroy(void* _p) { qpbleicsettings *p = (qpbleicsettings*)_p; ae_touch_ptr((void*)p); } void _qpbleicbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) { qpbleicbuffers *p = (qpbleicbuffers*)_p; ae_touch_ptr((void*)p); _minbleicstate_init(&p->solver, _state, make_automatic); _minbleicreport_init(&p->solverrep, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpi, 0, DT_INT, _state, make_automatic); } void _qpbleicbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { qpbleicbuffers *dst = (qpbleicbuffers*)_dst; qpbleicbuffers *src = (qpbleicbuffers*)_src; _minbleicstate_init_copy(&dst->solver, &src->solver, _state, make_automatic); _minbleicreport_init_copy(&dst->solverrep, &src->solverrep, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmpi, &src->tmpi, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; } void _qpbleicbuffers_clear(void* _p) { qpbleicbuffers *p = (qpbleicbuffers*)_p; ae_touch_ptr((void*)p); _minbleicstate_clear(&p->solver); _minbleicreport_clear(&p->solverrep); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmpi); } void _qpbleicbuffers_destroy(void* _p) { qpbleicbuffers *p = (qpbleicbuffers*)_p; ae_touch_ptr((void*)p); _minbleicstate_destroy(&p->solver); _minbleicreport_destroy(&p->solverrep); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmpi); } #endif #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* CONSTRAINED QUADRATIC PROGRAMMING The subroutine creates QP optimizer. After initial creation, it contains default optimization problem with zero quadratic and linear terms and no constraints. In order to actually solve something you should: * set cost vector with minqpsetlinearterm() * set variable bounds with minqpsetbc() or minqpsetbcall() * specify constraint matrix with one of the following functions: * modern API: * minqpsetlc2() for sparse two-sided constraints AL <= A*x <= AU * minqpsetlc2dense() for dense two-sided constraints AL <= A*x <= AU * minqpsetlc2mixed() for mixed two-sided constraints AL <= A*x <= AU * minqpaddlc2dense() to add one dense row to dense constraint submatrix * minqpaddlc2() to add one sparse row to sparse constraint submatrix * legacy API: * minqpsetlc() for dense one-sided equality/inequality constraints * minqpsetlcsparse() for sparse one-sided equality/inequality constraints * minqpsetlcmixed() for mixed dense/sparse one-sided equality/inequality constraints * choose appropriate QP solver and set it and its stopping criteria by means of minqpsetalgo??????() function * call minqpoptimize() to run the solver and minqpresults() to get the solution vector and additional information. Following solvers are recommended for convex and semidefinite problems: * QuickQP for dense problems with box-only constraints (or no constraints at all) * DENSE-IPM-QP for convex or semidefinite problems with medium (up to several thousands) variable count, dense/sparse quadratic term and any number (up to many thousands) of dense/sparse general linear constraints * SPARSE-IPM-QP for convex or semidefinite problems with large (many thousands) variable count, sparse quadratic term AND linear constraints. If your problem happens to be nonconvex, but either (a) is effectively convexified under constraints, or (b) has unique solution even with nonconvex target, then you can use: * QuickQP for dense nonconvex problems with box-only constraints * DENSE-AUL-QP for dense nonconvex problems which are effectively convexified under constraints with up to several thousands of variables and any (small or large) number of general linear constraints * QP-BLEIC for dense/sparse problems with small (up to several hundreds) number of general linear constraints and arbitrarily large variable count. INPUT PARAMETERS: N - problem size OUTPUT PARAMETERS: State - optimizer with zero quadratic/linear terms and no constraints -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state) { ae_int_t i; _minqpstate_clear(state); ae_assert(n>=1, "MinQPCreate: N<1", _state); /* * initialize QP solver */ state->n = n; state->mdense = 0; state->msparse = 0; state->repterminationtype = 0; state->absamax = (double)(1); state->absasum = (double)(1); state->absasum2 = (double)(1); state->akind = 0; state->sparseaupper = ae_false; cqminit(n, &state->a, _state); ae_vector_set_length(&state->b, n, _state); ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->havebndl, n, _state); ae_vector_set_length(&state->havebndu, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->startx, n, _state); ae_vector_set_length(&state->xorigin, n, _state); ae_vector_set_length(&state->xs, n, _state); rvectorsetlengthatleast(&state->replagbc, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->bndu.ptr.p_double[i] = _state->v_posinf; state->havebndl.ptr.p_bool[i] = ae_false; state->havebndu.ptr.p_bool[i] = ae_false; state->b.ptr.p_double[i] = 0.0; state->startx.ptr.p_double[i] = 0.0; state->xorigin.ptr.p_double[i] = 0.0; state->s.ptr.p_double[i] = 1.0; state->replagbc.ptr.p_double[i] = 0.0; } state->stype = 0; state->havex = ae_false; minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0, _state); qqploaddefaults(n, &state->qqpsettingsuser, _state); qpbleicloaddefaults(n, &state->qpbleicsettingsuser, _state); qpdenseaulloaddefaults(n, &state->qpdenseaulsettingsuser, _state); state->qpbleicfirstcall = ae_true; state->dbgskipconstraintnormalization = ae_false; state->veps = 0.0; } /************************************************************************* This function sets linear term for QP solver. By default, linear term is zero. INPUT PARAMETERS: State - structure which stores algorithm state B - linear term, array[N]. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetlinearterm(minqpstate* state, /* Real */ ae_vector* b, ae_state *_state) { ae_int_t n; n = state->n; ae_assert(b->cnt>=n, "MinQPSetLinearTerm: Length(B)n; ae_assert(a->rows>=n, "MinQPSetQuadraticTerm: Rows(A)cols>=n, "MinQPSetQuadraticTerm: Cols(A)n; ae_assert(sparsegetnrows(a, _state)==n, "MinQPSetQuadraticTermSparse: Rows(A)<>N", _state); ae_assert(sparsegetncols(a, _state)==n, "MinQPSetQuadraticTermSparse: Cols(A)<>N", _state); sparsecopytocrsbuf(a, &state->sparsea, _state); state->sparseaupper = isupper; state->akind = 1; /* * Estimate norm of A * (it will be used later in the quadratic penalty function) */ state->absamax = (double)(0); state->absasum = (double)(0); state->absasum2 = (double)(0); t0 = 0; t1 = 0; while(sparseenumerate(a, &t0, &t1, &i, &j, &v, _state)) { if( i==j ) { /* * Diagonal terms are counted only once */ state->absamax = ae_maxreal(state->absamax, v, _state); state->absasum = state->absasum+v; state->absasum2 = state->absasum2+v*v; } if( (j>i&&isupper)||(jabsamax = ae_maxreal(state->absamax, v, _state); state->absasum = state->absasum+2*v; state->absasum2 = state->absasum2+2*v*v; } } } /************************************************************************* This function sets starting point for QP solver. It is useful to have good initial approximation to the solution, because it will increase speed of convergence and identification of active constraints. NOTE: interior point solvers ignore initial point provided by user. INPUT PARAMETERS: State - structure which stores algorithm state X - starting point, array[N]. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetstartingpoint(minqpstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->n; ae_assert(x->cnt>=n, "MinQPSetStartingPoint: Length(B)n; ae_assert(xorigin->cnt>=n, "MinQPSetOrigin: Length(B)cnt>=state->n, "MinQPSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinQPSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinQPSetScale: S contains zero elements", _state); } for(i=0; i<=state->n-1; i++) { state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } state->stype = 0; } /************************************************************************* This function sets automatic evaluation of variable scaling. IMPORTANT: this function works only for matrices with positive diagonal elements! Zero or negative elements will result in -9 error code being returned. Specify scale vector manually with minqpsetscale() in such cases. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances) and as preconditioner. The best way to set scaling is to manually specify variable scales. However, sometimes you just need quick-and-dirty solution - either when you perform fast prototyping, or when you know your problem well and you are 100% sure that this quick solution is robust enough in your case. One such solution is to evaluate scale of I-th variable as 1/Sqrt(A[i,i]), where A[i,i] is an I-th diagonal element of the quadratic term. Such approach works well sometimes, but you have to be careful here. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ void minqpsetscaleautodiag(minqpstate* state, ae_state *_state) { state->stype = 1; } /************************************************************************* This function tells solver to use BLEIC-based algorithm and sets stopping criteria for the algorithm. This algorithm is intended for large-scale problems, possibly nonconvex, with small number of general linear constraints. Feasible initial point is essential for good performance. IMPORTANT: when DENSE-IPM (or DENSE-AUL for nonconvex problems) solvers are applicable, their performance is much better than that of BLEIC-QP. We recommend you to use BLEIC only when other solvers can not be used. ALGORITHM FEATURES: * supports dense and sparse QP problems * supports box and general linear equality/inequality constraints * can solve all types of problems (convex, semidefinite, nonconvex) as long as they are bounded from below under constraints. Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". Of course, global minimum is found only for positive definite and semidefinite problems. As for indefinite ones - only local minimum is found. ALGORITHM OUTLINE: * BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves quadratic programming problem as general linearly constrained optimization problem, which is solved by means of BLEIC solver (part of ALGLIB, active set method). ALGORITHM LIMITATIONS: * This algorithm is inefficient on problems with hundreds and thousands of general inequality constraints and infeasible initial point. Initial feasibility detection stage may take too long on such constraint sets. Consider using DENSE-IPM or DENSE-AUL instead. * unlike QuickQP solver, this algorithm does not perform Newton steps and does not use Level 3 BLAS. Being general-purpose active set method, it can activate constraints only one-by-one. Thus, its performance is lower than that of QuickQP. * its precision is also a bit inferior to that of QuickQP. BLEIC-QP performs only LBFGS steps (no Newton steps), which are good at detecting neighborhood of the solution, buy needs many iterations to find solution with more than 6 digits of precision. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} EpsX - >=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinQPSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. NOTE: this algorithm uses LBFGS iterations, which are relatively cheap, but improve function value only a bit. So you will need many iterations to converge - from 0.1*N to 10*N, depending on problem's condition number. IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (presently it is small step length, but it may change in the future versions of ALGLIB). -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetalgobleic(minqpstate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinQPSetAlgoBLEIC: EpsG is not finite number", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinQPSetAlgoBLEIC: negative EpsG", _state); ae_assert(ae_isfinite(epsf, _state), "MinQPSetAlgoBLEIC: EpsF is not finite number", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinQPSetAlgoBLEIC: negative EpsF", _state); ae_assert(ae_isfinite(epsx, _state), "MinQPSetAlgoBLEIC: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinQPSetAlgoBLEIC: negative EpsX", _state); ae_assert(maxits>=0, "MinQPSetAlgoBLEIC: negative MaxIts!", _state); state->algokind = 2; if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->qpbleicsettingsuser.epsg = epsg; state->qpbleicsettingsuser.epsf = epsf; state->qpbleicsettingsuser.epsx = epsx; state->qpbleicsettingsuser.maxits = maxits; } /************************************************************************* This function tells QP solver to use DENSE-AUL algorithm and sets stopping criteria for the algorithm. This algorithm is intended for non-convex problems with moderate (up to several thousands) variable count and arbitrary number of constraints which are either (a) effectively convexified under constraints or (b) have unique solution even with nonconvex target. IMPORTANT: when DENSE-IPM solver is applicable, its performance is usually much better than that of DENSE-AUL. We recommend you to use DENSE-AUL only when other solvers can not be used. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints * convergence is theoretically proved for positive-definite (convex) QP problems. Semidefinite and non-convex problems can be solved as long as they are bounded from below under constraints, although without theoretical guarantees. ALGORITHM OUTLINE: * this algorithm is an augmented Lagrangian method with dense preconditioner (hence its name). * it performs several outer iterations in order to refine values of the Lagrange multipliers. Single outer iteration is a solution of some unconstrained optimization problem: first it performs dense Cholesky factorization of the Hessian in order to build preconditioner (adaptive regularization is applied to enforce positive definiteness), and then it uses L-BFGS optimizer to solve optimization problem. * typically you need about 5-10 outer iterations to converge to solution ALGORITHM LIMITATIONS: * because dense Cholesky driver is used, this algorithm has O(N^2) memory requirements and O(OuterIterations*N^3) minimum running time. From the practical point of view, it limits its applicability by several thousands of variables. From the other side, variables count is the most limiting factor, and dependence on constraint count is much more lower. Assuming that constraint matrix is sparse, it may handle tens of thousands of general linear constraints. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0, stopping criteria for inner optimizer. Inner iterations are stopped when step length (with variable scaling being applied) is less than EpsX. See minqpsetscale() for more information on variable scaling. Rho - penalty coefficient, Rho>0: * large enough that algorithm converges with desired precision. * not TOO large to prevent ill-conditioning * recommended values are 100, 1000 or 10000 ItsCnt - number of outer iterations: * recommended values: 10-15 (although in most cases it converges within 5 iterations, you may need a few more to be sure). * ItsCnt=0 means that small number of outer iterations is automatically chosen (10 iterations in current version). * ItsCnt=1 means that AUL algorithm performs just as usual penalty method. * ItsCnt>1 means that AUL algorithm performs specified number of outer iterations IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic step length selection (specific step length chosen may change in the future versions of ALGLIB, so it is better to specify step length explicitly). -- ALGLIB -- Copyright 20.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetalgodenseaul(minqpstate* state, double epsx, double rho, ae_int_t itscnt, ae_state *_state) { ae_assert(ae_isfinite(epsx, _state), "MinQPSetAlgoDenseAUL: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinQPSetAlgoDenseAUL: negative EpsX", _state); ae_assert(ae_isfinite(rho, _state), "MinQPSetAlgoDenseAUL: Rho is not finite number", _state); ae_assert(ae_fp_greater(rho,(double)(0)), "MinQPSetAlgoDenseAUL: non-positive Rho", _state); ae_assert(itscnt>=0, "MinQPSetAlgoDenseAUL: negative ItsCnt!", _state); state->algokind = 4; if( ae_fp_eq(epsx,(double)(0)) ) { epsx = 1.0E-8; } if( itscnt==0 ) { itscnt = 10; } state->qpdenseaulsettingsuser.epsx = epsx; state->qpdenseaulsettingsuser.outerits = itscnt; state->qpdenseaulsettingsuser.rho = rho; } /************************************************************************* This function tells QP solver to use DENSE-IPM QP algorithm and sets stopping criteria for the algorithm. This algorithm is intended for convex and semidefinite problems with moderate (up to several thousands) variable count and arbitrary number of constraints. IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL or BLEIC-QP instead. If you try to run DENSE-IPM on problem with indefinite matrix (matrix having at least one negative eigenvalue) then depending on circumstances it may either (a) stall at some arbitrary point, or (b) throw exception on failure of Cholesky decomposition. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints ALGORITHM OUTLINE: * this algorithm is an implementation of interior point method as formulated by R.J.Vanderbei, with minor modifications to the algorithm (damped Newton directions are extensively used) * like all interior point methods, this algorithm tends to converge in roughly same number of iterations (between 15 and 30) independently from the problem dimensionality ALGORITHM LIMITATIONS: * because dense Cholesky driver is used, for N-dimensional problem with M dense constaints this algorithm has O(N^2+N*M) memory requirements and O(N^3+N*M^2) running time. Having sparse constraints with Z nonzeros per row relaxes storage and running time down to O(N^2+M*Z) and O(N^3+N*Z^2) From the practical point of view, it limits its applicability by several thousands of variables. From the other side, variables count is the most limiting factor, and dependence on constraint count is much more lower. Assuming that constraint matrix is sparse, it may handle tens of thousands of general linear constraints. INPUT PARAMETERS: State - structure which stores algorithm state Eps - >=0, stopping criteria. The algorithm stops when primal and dual infeasiblities as well as complementarity gap are less than Eps. IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. ===== TRACING IPM SOLVER ================================================= IPM solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'IPM' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. * 'IPM.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'IPM'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("IPM.DETAILED,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetalgodenseipm(minqpstate* state, double eps, ae_state *_state) { ae_assert(ae_isfinite(eps, _state), "MinQPSetAlgoDenseIPM: Eps is not finite number", _state); ae_assert(ae_fp_greater_eq(eps,(double)(0)), "MinQPSetAlgoDenseIPM: negative Eps", _state); state->algokind = 5; state->veps = eps; } /************************************************************************* This function tells QP solver to use SPARSE-IPM QP algorithm and sets stopping criteria for the algorithm. This algorithm is intended for convex and semidefinite problems with large variable and constraint count and sparse quadratic term and constraints. It is possible to have some limited set of dense linear constraints - they will be handled separately by dense BLAS - but the more dense constraints you have, the more time solver needs. IMPORTANT: internally this solver performs large and sparse (N+M)x(N+M) triangular factorization. So it expects both quadratic term and constraints to be highly sparse. However, its running time is influenced by BOTH fill factor and sparsity pattern. Generally we expect that no more than few nonzero elements per row are present. However different sparsity patterns may result in completely different running times even given same fill factor. In many cases this algorithm outperforms DENSE-IPM by order of magnitude. However, in some cases you may get better results with DENSE-IPM even when solving sparse task. IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL or BLEIC-QP instead. If you try to run DENSE-IPM on problem with indefinite matrix (matrix having at least one negative eigenvalue) then depending on circumstances it may either (a) stall at some arbitrary point, or (b) throw exception on failure of Cholesky decomposition. ALGORITHM FEATURES: * supports box and dense/sparse general linear equality/inequality constraints * specializes on large-scale sparse problems ALGORITHM OUTLINE: * this algorithm is an implementation of interior point method as formulated by R.J.Vanderbei, with minor modifications to the algorithm (damped Newton directions are extensively used) * like all interior point methods, this algorithm tends to converge in roughly same number of iterations (between 15 and 30) independently from the problem dimensionality ALGORITHM LIMITATIONS: * this algorithm may handle moderate number of dense constraints, usually no more than a thousand of dense ones without losing its efficiency. INPUT PARAMETERS: State - structure which stores algorithm state Eps - >=0, stopping criteria. The algorithm stops when primal and dual infeasiblities as well as complementarity gap are less than Eps. IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetalgosparseipm(minqpstate* state, double eps, ae_state *_state) { ae_assert(ae_isfinite(eps, _state), "MinQPSetAlgoSparseIPM: Eps is not finite number", _state); ae_assert(ae_fp_greater_eq(eps,(double)(0)), "MinQPSetAlgoSparseIPM: negative Eps", _state); state->algokind = 6; state->veps = eps; } /************************************************************************* This function tells solver to use QuickQP algorithm: special extra-fast algorithm for problems with box-only constrants. It may solve non-convex problems as long as they are bounded from below under constraints. ALGORITHM FEATURES: * several times faster than DENSE-IPM when running on box-only problem * utilizes accelerated methods for activation of constraints. * supports dense and sparse QP problems * supports ONLY box constraints; general linear constraints are NOT supported by this solver * can solve all types of problems (convex, semidefinite, nonconvex) as long as they are bounded from below under constraints. Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". In convex/semidefinite case global minimum is returned, in nonconvex case - algorithm returns one of the local minimums. ALGORITHM OUTLINE: * algorithm performs two kinds of iterations: constrained CG iterations and constrained Newton iterations * initially it performs small number of constrained CG iterations, which can efficiently activate/deactivate multiple constraints * after CG phase algorithm tries to calculate Cholesky decomposition and to perform several constrained Newton steps. If Cholesky decomposition failed (matrix is indefinite even under constraints), we perform more CG iterations until we converge to such set of constraints that system matrix becomes positive definite. Constrained Newton steps greatly increase convergence speed and precision. * algorithm interleaves CG and Newton iterations which allows to handle indefinite matrices (CG phase) and quickly converge after final set of constraints is found (Newton phase). Combination of CG and Newton phases is called "outer iteration". * it is possible to turn off Newton phase (beneficial for semidefinite problems - Cholesky decomposition will fail too often) ALGORITHM LIMITATIONS: * algorithm does not support general linear constraints; only box ones are supported * Cholesky decomposition for sparse problems is performed with Skyline Cholesky solver, which is intended for low-profile matrices. No profile- reducing reordering of variables is performed in this version of ALGLIB. * problems with near-zero negative eigenvalues (or exacty zero ones) may experience about 2-3x performance penalty. The reason is that Cholesky decomposition can not be performed until we identify directions of zero and negative curvature and activate corresponding boundary constraints - but we need a lot of trial and errors because these directions are hard to notice in the matrix spectrum. In this case you may turn off Newton phase of algorithm. Large negative eigenvalues are not an issue, so highly non-convex problems can be solved very efficiently. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} EpsX - >=0 The subroutine finishes its work if exploratory steepest descent step on k+1-th iteration satisfies following condition: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinQPSetScale() MaxOuterIts-maximum number of OUTER iterations. One outer iteration includes some amount of CG iterations (from 5 to ~N) and one or several (usually small amount) Newton steps. Thus, one outer iteration has high cost, but can greatly reduce funcation value. Use 0 if you do not want to limit number of outer iterations. UseNewton- use Newton phase or not: * Newton phase improves performance of positive definite dense problems (about 2 times improvement can be observed) * can result in some performance penalty on semidefinite or slightly negative definite problems - each Newton phase will bring no improvement (Cholesky failure), but still will require computational time. * if you doubt, you can turn off this phase - optimizer will retain its most of its high speed. IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (presently it is small step length, but it may change in the future versions of ALGLIB). -- ALGLIB -- Copyright 22.05.2014 by Bochkanov Sergey *************************************************************************/ void minqpsetalgoquickqp(minqpstate* state, double epsg, double epsf, double epsx, ae_int_t maxouterits, ae_bool usenewton, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinQPSetAlgoQuickQP: EpsG is not finite number", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinQPSetAlgoQuickQP: negative EpsG", _state); ae_assert(ae_isfinite(epsf, _state), "MinQPSetAlgoQuickQP: EpsF is not finite number", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinQPSetAlgoQuickQP: negative EpsF", _state); ae_assert(ae_isfinite(epsx, _state), "MinQPSetAlgoQuickQP: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinQPSetAlgoQuickQP: negative EpsX", _state); ae_assert(maxouterits>=0, "MinQPSetAlgoQuickQP: negative MaxOuterIts!", _state); state->algokind = 3; if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxouterits==0 ) { epsx = 1.0E-6; } state->qqpsettingsuser.maxouterits = maxouterits; state->qqpsettingsuser.epsg = epsg; state->qqpsettingsuser.epsf = epsf; state->qqpsettingsuser.epsx = epsx; state->qqpsettingsuser.cnphase = usenewton; } /************************************************************************* This function sets box constraints for QP solver Box constraints are inactive by default (after initial creation). After being set, they are preserved until explicitly overwritten with another minqpsetbc() or minqpsetbcall() call, or partially overwritten with minqpsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF (latter is recommended because it will allow solver to use better algorithm). BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF (latter is recommended because it will allow solver to use better algorithm). NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: if constraints for all variables are same you may use minqpsetbcall() which allows to specify constraints without using arrays. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbc(minqpstate* state, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(bndl->cnt>=n, "MinQPSetBC: Length(BndL)cnt>=n, "MinQPSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinQPSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinQPSetBC: BndU contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } } /************************************************************************* This function sets box constraints for QP solver (all variables at once, same constraints for all variables) Box constraints are inactive by default (after initial creation). After being set, they are preserved until explicitly overwritten with another minqpsetbc() or minqpsetbcall() call, or partially overwritten with minqpsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd BndL=BndU lower bound BndL<=x[i] BndU=+INF upper bound x[i]<=BndU BndL=-INF range BndL<=x[i]<=BndU ... free variable - BndL=-INF, BndU+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound, same for all variables BndU - upper bound, same for all variables NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbcall(minqpstate* state, double bndl, double bndu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(ae_isfinite(bndl, _state)||ae_isneginf(bndl, _state), "MinQPSetBCAll: BndL is NAN or +INF", _state); ae_assert(ae_isfinite(bndu, _state)||ae_isposinf(bndu, _state), "MinQPSetBCAll: BndU is NAN or -INF", _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = bndl; state->bndu.ptr.p_double[i] = bndu; state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl, _state); state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu, _state); } } /************************************************************************* This function sets box constraints for I-th variable (other variables are not modified). Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd BndL=BndU lower bound BndL<=x[i] BndU=+INF upper bound x[i]<=BndU BndL=-INF range BndL<=x[i]<=BndU ... free variable - BndL=-INF, BndU+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound BndU - upper bound NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: BndL>BndU will result in QP problem being recognized as infeasible. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetbci(minqpstate* state, ae_int_t i, double bndl, double bndu, ae_state *_state) { ae_assert(i>=0&&in, "MinQPSetBCi: I is outside of [0,N)", _state); ae_assert(ae_isfinite(bndl, _state)||ae_isneginf(bndl, _state), "MinQPSetBCi: BndL is NAN or +INF", _state); ae_assert(ae_isfinite(bndu, _state)||ae_isposinf(bndu, _state), "MinQPSetBCi: BndU is NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl; state->bndu.ptr.p_double[i] = bndu; state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl, _state); state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu, _state); } /************************************************************************* This function sets dense linear constraints for QP optimizer. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). After call to this function all non-box constraints are dropped, and you have only those constraints which were specified in the present call. If you want to specify mixed (with dense and sparse terms) linear constraints, you should call minqpsetlcmixed(). INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). -- ALGLIB -- Copyright 19.06.2012 by Bochkanov Sergey *************************************************************************/ void minqpsetlc(minqpstate* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_frame _frame_block; sparsematrix dummyc; ae_vector dummyct; ae_frame_make(_state, &_frame_block); memset(&dummyc, 0, sizeof(dummyc)); memset(&dummyct, 0, sizeof(dummyct)); _sparsematrix_init(&dummyc, _state, ae_true); ae_vector_init(&dummyct, 0, DT_INT, _state, ae_true); minqpsetlcmixed(state, &dummyc, &dummyct, 0, c, ct, k, _state); ae_frame_leave(_state); } /************************************************************************* This function sets sparse linear constraints for QP optimizer. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). After call to this function all non-box constraints are dropped, and you have only those constraints which were specified in the present call. If you want to specify mixed (with dense and sparse terms) linear constraints, you should call minqpsetlcmixed(). INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. C - linear constraints, sparse matrix with dimensions at least [K,N+1]. If matrix has larger size, only leading Kx(N+1) rectangle is used. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0 NOTE 1: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). -- ALGLIB -- Copyright 22.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetlcsparse(minqpstate* state, sparsematrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_frame _frame_block; ae_matrix dummyc; ae_vector dummyct; ae_frame_make(_state, &_frame_block); memset(&dummyc, 0, sizeof(dummyc)); memset(&dummyct, 0, sizeof(dummyct)); ae_matrix_init(&dummyc, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&dummyct, 0, DT_INT, _state, ae_true); minqpsetlcmixed(state, c, ct, k, &dummyc, &dummyct, 0, _state); ae_frame_leave(_state); } /************************************************************************* This function sets mixed linear constraints, which include a set of dense rows, and a set of sparse rows. This function overrides results of previous calls to minqpsetlc(), minqpsetlcsparse() and minqpsetlcmixed(). This function may be useful if constraint matrix includes large number of both types of rows - dense and sparse. If you have just a few sparse rows, you may represent them in dense format without loosing performance. Similarly, if you have just a few dense rows, you may store them in sparse format with almost same performance. INPUT PARAMETERS: State - structure previously allocated with MinQPCreate call. SparseC - linear constraints, sparse matrix with dimensions EXACTLY EQUAL TO [SparseK,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. SparseCT- type of sparse constraints, array[K]: * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1] * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x = SparseC[i,n+1] * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1] SparseK - number of sparse equality/inequality constraints, K>=0 DenseC - dense linear constraints, array[K,N+1]. Each row of DenseC represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of DenseC (including right part) must be finite. DenseCT - type of constraints, array[K]: * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1] * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x = DenseC[i,n+1] * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1] DenseK - number of equality/inequality constraints, DenseK>=0 NOTE 1: linear (non-box) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP solver is less precise). NOTE 2: due to backward compatibility reasons SparseC can be larger than [SparseK,N+1]. In this case only leading [SparseK,N+1] submatrix will be used. However, the rest of ALGLIB has more strict requirements on the input size, so we recommend you to pass sparse term whose size exactly matches algorithm expectations. -- ALGLIB -- Copyright 22.08.2016 by Bochkanov Sergey *************************************************************************/ void minqpsetlcmixed(minqpstate* state, sparsematrix* sparsec, /* Integer */ ae_vector* sparsect, ae_int_t sparsek, /* Real */ ae_matrix* densec, /* Integer */ ae_vector* densect, ae_int_t densek, ae_state *_state) { ae_frame _frame_block; ae_int_t n; ae_int_t i; ae_int_t j; ae_int_t j0; double v; ae_vector srcidx; ae_vector dstidx; ae_vector s; ae_vector rs; ae_vector eoffs; ae_vector roffs; ae_vector v2; ae_vector eidx; ae_vector eval; ae_int_t t0; ae_int_t t1; ae_int_t nnz; ae_frame_make(_state, &_frame_block); memset(&srcidx, 0, sizeof(srcidx)); memset(&dstidx, 0, sizeof(dstidx)); memset(&s, 0, sizeof(s)); memset(&rs, 0, sizeof(rs)); memset(&eoffs, 0, sizeof(eoffs)); memset(&roffs, 0, sizeof(roffs)); memset(&v2, 0, sizeof(v2)); memset(&eidx, 0, sizeof(eidx)); memset(&eval, 0, sizeof(eval)); ae_vector_init(&srcidx, 0, DT_INT, _state, ae_true); ae_vector_init(&dstidx, 0, DT_INT, _state, ae_true); ae_vector_init(&s, 0, DT_REAL, _state, ae_true); ae_vector_init(&rs, 0, DT_INT, _state, ae_true); ae_vector_init(&eoffs, 0, DT_INT, _state, ae_true); ae_vector_init(&roffs, 0, DT_INT, _state, ae_true); ae_vector_init(&v2, 0, DT_REAL, _state, ae_true); ae_vector_init(&eidx, 0, DT_INT, _state, ae_true); ae_vector_init(&eval, 0, DT_REAL, _state, ae_true); n = state->n; /* * First, check for errors in the inputs */ ae_assert(densek>=0, "MinQPSetLCMixed: K<0", _state); ae_assert(densek==0||densec->cols>=n+1, "MinQPSetLCMixed: Cols(C)rows>=densek, "MinQPSetLCMixed: Rows(DenseC)cnt>=densek, "MinQPSetLCMixed: Length(DenseCT)=0, "MinQPSetLCMixed: SparseK<0", _state); ae_assert(sparsek==0||sparsegetncols(sparsec, _state)>=n+1, "MinQPSetLCMixed: Cols(SparseC)=sparsek, "MinQPSetLCMixed: Rows(SparseC)cnt>=sparsek, "MinQPSetLCMixed: Length(SparseCT)replaglc, densek+sparsek, _state); for(i=0; i<=densek+sparsek-1; i++) { state->replaglc.ptr.p_double[i] = 0.0; } /* * Init */ ae_vector_set_length(&state->cl, densek+sparsek, _state); ae_vector_set_length(&state->cu, densek+sparsek, _state); state->mdense = densek; state->msparse = sparsek; if( sparsek>0 ) { /* * Evaluate row sizes for new storage */ ae_vector_set_length(&rs, sparsek, _state); for(i=0; i<=sparsek-1; i++) { rs.ptr.p_int[i] = 0; } t0 = 0; t1 = 0; nnz = 0; while(sparseenumerate(sparsec, &t0, &t1, &i, &j, &v, _state)) { if( i>sparsek-1||j>n-1 ) { continue; } ae_assert(ae_isfinite(v, _state), "MinQPSetLCSparse: C contains infinite or NAN values", _state); nnz = nnz+1; rs.ptr.p_int[i] = rs.ptr.p_int[i]+1; } /* * Prepare new sparse CRS storage, copy leading SparseK*N submatrix into the storage */ for(i=0; i<=sparsek-1; i++) { state->cl.ptr.p_double[i] = (double)(0); state->cu.ptr.p_double[i] = (double)(0); } state->sparsec.m = sparsek; state->sparsec.n = n; ivectorsetlengthatleast(&state->sparsec.ridx, sparsek+1, _state); ivectorsetlengthatleast(&state->sparsec.idx, nnz, _state); rvectorsetlengthatleast(&state->sparsec.vals, nnz, _state); ae_vector_set_length(&eoffs, sparsek+1, _state); state->sparsec.ridx.ptr.p_int[0] = 0; eoffs.ptr.p_int[0] = 0; for(i=1; i<=sparsek; i++) { state->sparsec.ridx.ptr.p_int[i] = state->sparsec.ridx.ptr.p_int[i-1]+rs.ptr.p_int[i-1]; eoffs.ptr.p_int[i] = state->sparsec.ridx.ptr.p_int[i]; } t0 = 0; t1 = 0; while(sparseenumerate(sparsec, &t0, &t1, &i, &j, &v, _state)) { if( i>sparsek-1||j>n ) { continue; } if( jsparsec.idx.ptr.p_int[j0] = j; state->sparsec.vals.ptr.p_double[j0] = v; eoffs.ptr.p_int[i] = j0+1; } else { /* * Handle right part of the constraint */ state->cl.ptr.p_double[i] = v; state->cu.ptr.p_double[i] = v; } } for(i=0; i<=sparsek-1; i++) { ae_assert(eoffs.ptr.p_int[i]==state->sparsec.ridx.ptr.p_int[i+1], "MinQP: critical integrity check failed (sparse copying)", _state); } sparsecreatecrsinplace(&state->sparsec, _state); for(i=0; i<=sparsek-1; i++) { if( sparsect->ptr.p_int[i]>0 ) { state->cu.ptr.p_double[i] = _state->v_posinf; } if( sparsect->ptr.p_int[i]<0 ) { state->cl.ptr.p_double[i] = _state->v_neginf; } } } if( densek>0 ) { /* * Copy dense constraints */ rmatrixsetlengthatleast(&state->densec, densek, n, _state); for(i=0; i<=densek-1; i++) { for(j=0; j<=n-1; j++) { state->densec.ptr.pp_double[i][j] = densec->ptr.pp_double[i][j]; } if( densect->ptr.p_int[i]>0 ) { state->cl.ptr.p_double[sparsek+i] = densec->ptr.pp_double[i][n]; state->cu.ptr.p_double[sparsek+i] = _state->v_posinf; continue; } if( densect->ptr.p_int[i]<0 ) { state->cl.ptr.p_double[sparsek+i] = _state->v_neginf; state->cu.ptr.p_double[sparsek+i] = densec->ptr.pp_double[i][n]; continue; } state->cl.ptr.p_double[sparsek+i] = densec->ptr.pp_double[i][n]; state->cu.ptr.p_double[sparsek+i] = densec->ptr.pp_double[i][n]; } } ae_frame_leave(_state); } /************************************************************************* This function provides legacy API for specification of mixed dense/sparse linear constraints. New conventions used by ALGLIB since release 3.16.0 state that set of sparse constraints comes first, followed by set of dense ones. This convention is essential when you talk about things like order of Lagrange multipliers. However, legacy API accepted mixed constraints in reverse order. This function is here to simplify situation with code relying on legacy API. It simply accepts constraints in one order (old) and passes them to new API, now in correct order. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlcmixedlegacy(minqpstate* state, /* Real */ ae_matrix* densec, /* Integer */ ae_vector* densect, ae_int_t densek, sparsematrix* sparsec, /* Integer */ ae_vector* sparsect, ae_int_t sparsek, ae_state *_state) { minqpsetlcmixed(state, sparsec, sparsect, sparsek, densec, densect, densek, _state); } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with dense constraint matrix A. NOTE: knowing that constraint matrix is dense helps some QP solvers (especially modern IPM method) to utilize efficient dense Level 3 BLAS for dense parts of the problem. If your problem has both dense and sparse constraints, you can use minqpsetlc2mixed() function, which will result in dense algebra being applied to dense terms, and sparse sparse linear algebra applied to sparse terms. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2dense(minqpstate* state, /* Real */ ae_matrix* a, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_int_t k, ae_state *_state) { minqpsetlc2mixed(state, &state->dummysparse, 0, a, k, al, au, _state); } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with sparse constraining matrix A. Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2(minqpstate* state, sparsematrix* a, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_int_t k, ae_state *_state) { minqpsetlc2mixed(state, a, k, &state->dummyr2, 0, al, au, _state); } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with mixed constraining matrix A including sparse part (first SparseK rows) and dense part (last DenseK rows). Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). This function may be useful if constraint matrix includes large number of both types of rows - dense and sparse. If you have just a few sparse rows, you may represent them in dense format without loosing performance. Similarly, if you have just a few dense rows, you may store them in sparse format with almost same performance. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. SparseA - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. SparseK - number of sparse constraints, SparseK>=0 DenseA - linear constraints, array[K,N], set of dense constraints. Each row of A represents one general linear constraint. DenseK - number of dense constraints, DenseK>=0 AL, AU - lower and upper bounds, array[SparseK+DenseK], with former SparseK elements corresponding to sparse constraints, and latter DenseK elements corresponding to dense constraints; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 01.11.2019 by Bochkanov Sergey *************************************************************************/ void minqpsetlc2mixed(minqpstate* state, sparsematrix* sparsea, ae_int_t ksparse, /* Real */ ae_matrix* densea, ae_int_t kdense, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; n = state->n; m = kdense+ksparse; /* * Check input arguments */ ae_assert(ksparse>=0, "MinQPSetLC2Mixed: KSparse<0", _state); ae_assert(ksparse==0||sparsegetncols(sparsea, _state)==n, "MinQPSetLC2: Cols(SparseA)<>N", _state); ae_assert(ksparse==0||sparsegetnrows(sparsea, _state)==ksparse, "MinQPSetLC2: Rows(SparseA)<>K", _state); ae_assert(kdense>=0, "MinQPSetLC2Mixed: KDense<0", _state); ae_assert(kdense==0||densea->cols>=n, "MinQPSetLC2Mixed: Cols(DenseA)rows>=kdense, "MinQPSetLC2Mixed: Rows(DenseA)cnt>=kdense+ksparse, "MinQPSetLC2Mixed: Length(AL)cnt>=kdense+ksparse, "MinQPSetLC2Mixed: Length(AU)ptr.p_double[i], _state)||ae_isneginf(al->ptr.p_double[i], _state), "MinQPSetLC2Mixed: AL contains NAN or +INF", _state); ae_assert(ae_isfinite(au->ptr.p_double[i], _state)||ae_isposinf(au->ptr.p_double[i], _state), "MinQPSetLC2Mixed: AU contains NAN or -INF", _state); } /* * Allocate place for Lagrange multipliers, fill by zero */ rvectorsetlengthatleast(&state->replaglc, kdense+ksparse, _state); for(i=0; i<=kdense+ksparse-1; i++) { state->replaglc.ptr.p_double[i] = 0.0; } /* * Quick exit if needed */ if( m==0 ) { state->mdense = 0; state->msparse = 0; return; } /* * Prepare */ rvectorsetlengthatleast(&state->cl, m, _state); rvectorsetlengthatleast(&state->cu, m, _state); for(i=0; i<=m-1; i++) { state->cl.ptr.p_double[i] = al->ptr.p_double[i]; state->cu.ptr.p_double[i] = au->ptr.p_double[i]; } state->mdense = kdense; state->msparse = ksparse; /* * Copy dense and sparse terms */ if( ksparse>0 ) { sparsecopytocrsbuf(sparsea, &state->sparsec, _state); } if( kdense>0 ) { rmatrixsetlengthatleast(&state->densec, kdense, n, _state); rmatrixcopy(kdense, n, densea, 0, 0, &state->densec, 0, 0, _state); } } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present dense constraints. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. A - linear constraint coefficient, array[N], right side is NOT included. AL, AU - lower and upper bounds; * AL=AU => equality constraint Ai*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint Ai*x<=AU * AU=+INF => one-sided constraint AL<=Ai*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minqpaddlc2dense(minqpstate* state, /* Real */ ae_vector* a, double al, double au, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(a->cnt>=n, "MinQPAddLC2Dense: Length(A)cl, state->msparse+state->mdense+1, _state); rvectorgrowto(&state->cu, state->msparse+state->mdense+1, _state); rvectorgrowto(&state->replaglc, state->msparse+state->mdense+1, _state); rmatrixgrowrowsto(&state->densec, state->mdense+1, n, _state); for(i=0; i<=n-1; i++) { state->densec.ptr.pp_double[state->mdense][i] = a->ptr.p_double[i]; } state->cl.ptr.p_double[state->msparse+state->mdense] = al; state->cu.ptr.p_double[state->msparse+state->mdense] = au; state->replaglc.ptr.p_double[state->msparse+state->mdense] = 0.0; inc(&state->mdense, _state); } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present sparse constraints. Constraint is passed in compressed format: as list of non-zero entries of coefficient vector A. Such approach is more efficient than dense storage for highly sparse constraint vectors. INPUT PARAMETERS: State - structure previously allocated with minqpcreate() call. IdxA - array[NNZ], indexes of non-zero elements of A: * can be unsorted * can include duplicate indexes (corresponding entries of ValA[] will be summed) ValA - array[NNZ], values of non-zero elements of A NNZ - number of non-zero coefficients in A AL, AU - lower and upper bounds; * AL=AU => equality constraint A*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint A*x<=AU * AU=+INF => one-sided constraint AL<=A*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minqpaddlc2(minqpstate* state, /* Integer */ ae_vector* idxa, /* Real */ ae_vector* vala, ae_int_t nnz, double al, double au, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t offs; ae_int_t offsdst; ae_int_t n; ae_int_t didx; ae_int_t uidx; n = state->n; /* * Check inputs */ ae_assert(nnz>=0, "MinQPAddLC2: NNZ<0", _state); ae_assert(idxa->cnt>=nnz, "MinQPAddLC2: Length(IdxA)cnt>=nnz, "MinQPAddLC2: Length(ValA)ptr.p_int[i]>=0&&idxa->ptr.p_int[i]msparse==0 ) { state->sparsec.matrixtype = 1; state->sparsec.m = 0; state->sparsec.n = n; state->sparsec.ninitialized = 0; ivectorsetlengthatleast(&state->sparsec.ridx, 1, _state); state->sparsec.ridx.ptr.p_int[0] = 0; } ae_assert(state->sparsec.matrixtype==1&&state->sparsec.m==state->msparse, "MinQPAddLC2Dense: integrity check failed!", _state); /* * Reallocate inequality bounds */ rvectorgrowto(&state->cl, state->msparse+state->mdense+1, _state); rvectorgrowto(&state->cu, state->msparse+state->mdense+1, _state); rvectorgrowto(&state->replaglc, state->msparse+state->mdense+1, _state); for(i=state->msparse+state->mdense; i>=state->msparse+1; i--) { state->cl.ptr.p_double[i] = state->cl.ptr.p_double[i-1]; state->cu.ptr.p_double[i] = state->cu.ptr.p_double[i-1]; state->replaglc.ptr.p_double[i] = state->replaglc.ptr.p_double[i-1]; } state->cl.ptr.p_double[state->msparse] = al; state->cu.ptr.p_double[state->msparse] = au; state->replaglc.ptr.p_double[state->msparse] = 0.0; /* * Reallocate sparse storage */ offs = state->sparsec.ridx.ptr.p_int[state->msparse]; ivectorgrowto(&state->sparsec.idx, offs+nnz, _state); rvectorgrowto(&state->sparsec.vals, offs+nnz, _state); ivectorgrowto(&state->sparsec.didx, state->msparse+1, _state); ivectorgrowto(&state->sparsec.uidx, state->msparse+1, _state); ivectorgrowto(&state->sparsec.ridx, state->msparse+2, _state); /* * If NNZ=0, perform quick and simple row append. */ if( nnz==0 ) { state->sparsec.didx.ptr.p_int[state->msparse] = state->sparsec.ridx.ptr.p_int[state->msparse]; state->sparsec.uidx.ptr.p_int[state->msparse] = state->sparsec.ridx.ptr.p_int[state->msparse]; state->sparsec.ridx.ptr.p_int[state->msparse+1] = state->sparsec.ridx.ptr.p_int[state->msparse]; inc(&state->sparsec.m, _state); inc(&state->msparse, _state); return; } /* * Now we are sure that SparseC contains properly initialized sparse * matrix (or some appropriate dummy for M=0) and we have NNZ>0 * (no need to care about degenerate cases). * * Append rows to SparseC: * * append data * * sort in place * * merge duplicate indexes * * compute DIdx and UIdx * */ for(i=0; i<=nnz-1; i++) { state->sparsec.idx.ptr.p_int[offs+i] = idxa->ptr.p_int[i]; state->sparsec.vals.ptr.p_double[offs+i] = vala->ptr.p_double[i]; } tagsortmiddleir(&state->sparsec.idx, &state->sparsec.vals, offs, nnz, _state); offsdst = offs; for(i=1; i<=nnz-1; i++) { if( state->sparsec.idx.ptr.p_int[offsdst]!=state->sparsec.idx.ptr.p_int[offs+i] ) { offsdst = offsdst+1; state->sparsec.idx.ptr.p_int[offsdst] = state->sparsec.idx.ptr.p_int[offs+i]; state->sparsec.vals.ptr.p_double[offsdst] = state->sparsec.vals.ptr.p_double[offs+i]; } else { state->sparsec.vals.ptr.p_double[offsdst] = state->sparsec.vals.ptr.p_double[offsdst]+state->sparsec.vals.ptr.p_double[offs+i]; } } nnz = offsdst-offs+1; uidx = -1; didx = -1; for(j=offs; j<=offsdst; j++) { k = state->sparsec.idx.ptr.p_int[j]; if( k==state->msparse ) { didx = j; } else { if( k>state->msparse&&uidx==-1 ) { uidx = j; break; } } } if( uidx==-1 ) { uidx = offsdst+1; } if( didx==-1 ) { didx = uidx; } state->sparsec.didx.ptr.p_int[state->msparse] = didx; state->sparsec.uidx.ptr.p_int[state->msparse] = uidx; state->sparsec.ridx.ptr.p_int[state->msparse+1] = offsdst+1; state->sparsec.ninitialized = state->sparsec.ridx.ptr.p_int[state->msparse+1]; inc(&state->sparsec.m, _state); inc(&state->msparse, _state); } /************************************************************************* This function solves quadratic programming problem. Prior to calling this function you should choose solver by means of one of the following functions: * minqpsetalgoquickqp() - for QuickQP solver * minqpsetalgobleic() - for BLEIC-QP solver * minqpsetalgodenseaul() - for Dense-AUL-QP solver * minqpsetalgodenseipm() - for Dense-IPM-QP solver These functions also allow you to control stopping criteria of the solver. If you did not set solver, MinQP subpackage will automatically select solver for your problem and will run it with default stopping criteria. However, it is better to set explicitly solver and its stopping criteria. INPUT PARAMETERS: State - algorithm state You should use MinQPResults() function to access results after calls to this function. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey. Special thanks to Elvira Illarionova for important suggestions on the linearly constrained QP algorithm. *************************************************************************/ void minqpoptimize(minqpstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; ae_int_t nbc; ae_int_t neq; ae_int_t nineq; ae_int_t curecpos; ae_int_t curicpos; n = state->n; m = state->mdense+state->msparse; state->repterminationtype = -5; state->repinneriterationscount = 0; state->repouteriterationscount = 0; state->repncholesky = 0; state->repnmv = 0; /* * Zero-fill Lagrange multipliers (their initial value) */ for(i=0; i<=n-1; i++) { state->replagbc.ptr.p_double[i] = 0.0; } for(i=0; i<=m-1; i++) { state->replaglc.ptr.p_double[i] = 0.0; } /* * Initial point: * * if we have starting point in StartX, we just have to bound it * * if we do not have StartX, deduce initial point from boundary constraints */ if( state->havex ) { for(i=0; i<=n-1; i++) { state->xs.ptr.p_double[i] = state->startx.ptr.p_double[i]; if( state->havebndl.ptr.p_bool[i]&&ae_fp_less(state->xs.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->xs.ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->havebndu.ptr.p_bool[i]&&ae_fp_greater(state->xs.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->xs.ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } } else { for(i=0; i<=n-1; i++) { if( state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i] ) { state->xs.ptr.p_double[i] = 0.5*(state->bndl.ptr.p_double[i]+state->bndu.ptr.p_double[i]); continue; } if( state->havebndl.ptr.p_bool[i] ) { state->xs.ptr.p_double[i] = state->bndl.ptr.p_double[i]; continue; } if( state->havebndu.ptr.p_bool[i] ) { state->xs.ptr.p_double[i] = state->bndu.ptr.p_double[i]; continue; } state->xs.ptr.p_double[i] = (double)(0); } } /* * check correctness of constraints */ for(i=0; i<=n-1; i++) { if( state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i] ) { if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->repterminationtype = -3; return; } } } /* * count number of bound and linear constraints */ nbc = 0; for(i=0; i<=n-1; i++) { if( state->havebndl.ptr.p_bool[i] ) { nbc = nbc+1; } if( state->havebndu.ptr.p_bool[i] ) { nbc = nbc+1; } } /* * Effective scale */ rvectorsetlengthatleast(&state->effectives, n, _state); if( state->stype==0 ) { /* * User scale (or default one) */ for(i=0; i<=n-1; i++) { state->effectives.ptr.p_double[i] = state->s.ptr.p_double[i]; } } else { if( state->stype==1 ) { /* * Diagonal is used for scaling: * * unpack * * convert to scale, return error on failure */ if( state->akind==0 ) { /* * Unpack CQM structure */ cqmgetdiaga(&state->a, &state->effectives, _state); } else { if( state->akind==1 ) { for(i=0; i<=n-1; i++) { state->effectives.ptr.p_double[i] = sparseget(&state->sparsea, i, i, _state); } } else { ae_assert(ae_false, "MinQPOptimize: integrity check failed", _state); } } for(i=0; i<=n-1; i++) { if( ae_fp_less_eq(state->effectives.ptr.p_double[i],(double)(0)) ) { state->repterminationtype = -9; return; } state->effectives.ptr.p_double[i] = 1/ae_sqrt(state->effectives.ptr.p_double[i], _state); } } else { ae_assert(ae_false, "MinQPOptimize: integrity check failed", _state); } } /* * Solvers which can not handle new two-sided constraints need them to be * converted into legacy equality/inequality one-sided format */ if( state->algokind==2||state->algokind==4 ) { /* * Scan constraint left/right sides, count equality ones and one/two-sided inequality ones */ neq = 0; nineq = 0; for(i=0; i<=m-1; i++) { if( (ae_isfinite(state->cl.ptr.p_double[i], _state)&&ae_isfinite(state->cu.ptr.p_double[i], _state))&&ae_fp_eq(state->cl.ptr.p_double[i],state->cu.ptr.p_double[i]) ) { inc(&neq, _state); continue; } if( ae_isfinite(state->cl.ptr.p_double[i], _state) ) { inc(&nineq, _state); } if( ae_isfinite(state->cu.ptr.p_double[i], _state) ) { inc(&nineq, _state); } } /* * Perform conversion */ rmatrixsetlengthatleast(&state->ecleic, neq+nineq, n+1, _state); rvectorsetlengthatleast(&state->elagmlt, neq+nineq, _state); ivectorsetlengthatleast(&state->elagidx, neq+nineq, _state); curecpos = 0; curicpos = neq; for(i=0; i<=m-1; i++) { if( (ae_isfinite(state->cl.ptr.p_double[i], _state)&&ae_isfinite(state->cu.ptr.p_double[i], _state))&&ae_fp_eq(state->cl.ptr.p_double[i],state->cu.ptr.p_double[i]) ) { /* * Offload equality constraint */ if( imsparse ) { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curecpos][j] = (double)(0); } j0 = state->sparsec.ridx.ptr.p_int[i]; j1 = state->sparsec.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->ecleic.ptr.pp_double[curecpos][state->sparsec.idx.ptr.p_int[j]] = state->sparsec.vals.ptr.p_double[j]; } } else { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curecpos][j] = state->densec.ptr.pp_double[i-state->msparse][j]; } } state->ecleic.ptr.pp_double[curecpos][n] = state->cu.ptr.p_double[i]; state->elagidx.ptr.p_int[curecpos] = i; state->elagmlt.ptr.p_double[curecpos] = 1.0; inc(&curecpos, _state); continue; } if( ae_isfinite(state->cl.ptr.p_double[i], _state) ) { /* * Offload inequality constraint of the form CL<=C*x, convert it to -C*x<=-CL */ if( imsparse ) { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curicpos][j] = (double)(0); } j0 = state->sparsec.ridx.ptr.p_int[i]; j1 = state->sparsec.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->ecleic.ptr.pp_double[curicpos][state->sparsec.idx.ptr.p_int[j]] = -state->sparsec.vals.ptr.p_double[j]; } } else { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curicpos][j] = -state->densec.ptr.pp_double[i-state->msparse][j]; } } state->ecleic.ptr.pp_double[curicpos][n] = -state->cl.ptr.p_double[i]; state->elagidx.ptr.p_int[curicpos] = i; state->elagmlt.ptr.p_double[curicpos] = -1.0; inc(&curicpos, _state); } if( ae_isfinite(state->cu.ptr.p_double[i], _state) ) { /* * Offload inequality constraint of the form C*x<=CU */ if( imsparse ) { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curicpos][j] = (double)(0); } j0 = state->sparsec.ridx.ptr.p_int[i]; j1 = state->sparsec.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->ecleic.ptr.pp_double[curicpos][state->sparsec.idx.ptr.p_int[j]] = state->sparsec.vals.ptr.p_double[j]; } } else { for(j=0; j<=n-1; j++) { state->ecleic.ptr.pp_double[curicpos][j] = state->densec.ptr.pp_double[i-state->msparse][j]; } } state->ecleic.ptr.pp_double[curicpos][n] = state->cu.ptr.p_double[i]; state->elagidx.ptr.p_int[curicpos] = i; state->elagmlt.ptr.p_double[curicpos] = 1.0; inc(&curicpos, _state); } } ae_assert(curecpos==neq&&curicpos==neq+nineq, "MinQPOptimize: critical integrity check failed (ECLEIC conversion)", _state); /* * Run solvers */ if( state->algokind==2 ) { qpbleicoptimize(&state->a, &state->sparsea, state->akind, state->sparseaupper, state->absasum, state->absasum2, &state->b, &state->bndl, &state->bndu, &state->effectives, &state->xorigin, n, &state->ecleic, neq, nineq, &state->qpbleicsettingsuser, &state->qpbleicbuf, &state->qpbleicfirstcall, &state->xs, &state->repterminationtype, _state); state->repinneriterationscount = state->qpbleicbuf.repinneriterationscount; state->repouteriterationscount = state->qpbleicbuf.repouteriterationscount; return; } if( state->algokind==4 ) { qpdenseauloptimize(&state->a, &state->sparsea, state->akind, state->sparseaupper, &state->b, &state->bndl, &state->bndu, &state->effectives, &state->xorigin, n, &state->ecleic, neq, nineq, &state->dummysparse, 0, 0, !state->dbgskipconstraintnormalization, &state->qpdenseaulsettingsuser, &state->qpdenseaulbuf, &state->xs, &state->replagbc, &state->elaglc, &state->repterminationtype, _state); for(i=0; i<=neq+nineq-1; i++) { state->replaglc.ptr.p_double[state->elagidx.ptr.p_int[i]] = state->replaglc.ptr.p_double[state->elagidx.ptr.p_int[i]]+state->elaglc.ptr.p_double[i]*state->elagmlt.ptr.p_double[i]; } state->repinneriterationscount = state->qpdenseaulbuf.repinneriterationscount; state->repouteriterationscount = state->qpdenseaulbuf.repouteriterationscount; state->repncholesky = state->qpdenseaulbuf.repncholesky; return; } ae_assert(ae_false, "MinQPOptimize: integrity check failed - unknown solver", _state); } /* * QuickQP solver */ if( state->algokind==3 ) { if( state->mdense+state->msparse>0 ) { state->repterminationtype = -5; return; } qqpoptimize(&state->a, &state->sparsea, &state->dummyr2, state->akind, state->sparseaupper, &state->b, &state->bndl, &state->bndu, &state->effectives, &state->xorigin, n, &state->qqpsettingsuser, &state->qqpbuf, &state->xs, &state->repterminationtype, _state); state->repinneriterationscount = state->qqpbuf.repinneriterationscount; state->repouteriterationscount = state->qqpbuf.repouteriterationscount; state->repncholesky = state->qqpbuf.repncholesky; return; } /* * QP-DENSE-IPM and QP-SPARSE-IPM solvers */ if( state->algokind==5||state->algokind==6 ) { /* * Solve */ ae_assert(state->akind==0||state->akind==1, "MinQPOptimize: unexpected AKind", _state); if( state->akind==0 ) { cqmgeta(&state->a, &state->tmpr2, _state); } if( state->algokind==5 ) { vipminitdense(&state->vsolver, &state->effectives, &state->xorigin, n, _state); } if( state->algokind==6 ) { vipminitsparse(&state->vsolver, &state->effectives, &state->xorigin, n, _state); } vipmsetquadraticlinear(&state->vsolver, &state->tmpr2, &state->sparsea, state->akind, state->sparseaupper, &state->b, _state); vipmsetconstraints(&state->vsolver, &state->bndl, &state->bndu, &state->sparsec, state->msparse, &state->densec, state->mdense, &state->cl, &state->cu, _state); vipmsetcond(&state->vsolver, state->veps, state->veps, state->veps, _state); vipmoptimize(&state->vsolver, &state->xs, &state->replagbc, &state->replaglc, &state->repterminationtype, _state); state->repinneriterationscount = state->vsolver.repiterationscount; state->repouteriterationscount = state->vsolver.repiterationscount; state->repncholesky = state->vsolver.repncholesky; return; } /* * Integrity check failed - unknown solver */ ae_assert(ae_false, "MinQPOptimize: integrity check failed - unknown solver", _state); } /************************************************************************* QP solver results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution. This array is allocated and initialized only when Rep.TerminationType parameter is positive (success). Rep - optimization report, contains: * completion code in Rep.TerminationType (positive values denote some kind of success, negative - failures) * Lagrange multipliers - for QP solvers which support then * other statistics See comments on minqpreport structure for more information -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpresults(minqpstate* state, /* Real */ ae_vector* x, minqpreport* rep, ae_state *_state) { ae_vector_clear(x); _minqpreport_clear(rep); minqpresultsbuf(state, x, rep, _state); } /************************************************************************* QP results Buffered implementation of MinQPResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpresultsbuf(minqpstate* state, /* Real */ ae_vector* x, minqpreport* rep, ae_state *_state) { ae_int_t i; ae_assert(state->xs.cnt>=state->n, "MinQPResultsBuf: integrity check failed", _state); ae_assert(state->replagbc.cnt>=state->n, "MinQPResultsBuf: integrity check failed", _state); ae_assert(state->replaglc.cnt>=state->mdense+state->msparse, "MinQPResultsBuf: integrity check failed", _state); rvectorsetlengthatleast(x, state->n, _state); rvectorsetlengthatleast(&rep->lagbc, state->n, _state); rvectorsetlengthatleast(&rep->laglc, state->mdense+state->msparse, _state); for(i=0; i<=state->n-1; i++) { x->ptr.p_double[i] = state->xs.ptr.p_double[i]; rep->lagbc.ptr.p_double[i] = state->replagbc.ptr.p_double[i]; } for(i=0; i<=state->mdense+state->msparse-1; i++) { rep->laglc.ptr.p_double[i] = state->replaglc.ptr.p_double[i]; } rep->inneriterationscount = state->repinneriterationscount; rep->outeriterationscount = state->repouteriterationscount; rep->nmv = state->repnmv; rep->ncholesky = state->repncholesky; rep->terminationtype = state->repterminationtype; } /************************************************************************* Fast version of MinQPSetLinearTerm(), which doesn't check its arguments. For internal use only. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetlineartermfast(minqpstate* state, /* Real */ ae_vector* b, ae_state *_state) { ae_v_move(&state->b.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); } /************************************************************************* Fast version of MinQPSetQuadraticTerm(), which doesn't check its arguments. It accepts additional parameter - shift S, which allows to "shift" matrix A by adding s*I to A. S must be positive (although it is not checked). For internal use only. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetquadratictermfast(minqpstate* state, /* Real */ ae_matrix* a, ae_bool isupper, double s, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t n; double v; ae_int_t j0; ae_int_t j1; n = state->n; state->akind = 0; cqmseta(&state->a, a, isupper, 1.0, _state); if( ae_fp_greater(s,(double)(0)) ) { rvectorsetlengthatleast(&state->tmp0, n, _state); for(i=0; i<=n-1; i++) { state->tmp0.ptr.p_double[i] = a->ptr.pp_double[i][i]+s; } cqmrewritedensediagonal(&state->a, &state->tmp0, _state); } /* * Estimate norm of A * (it will be used later in the quadratic penalty function) */ state->absamax = (double)(0); state->absasum = (double)(0); state->absasum2 = (double)(0); for(i=0; i<=n-1; i++) { if( isupper ) { j0 = i; j1 = n-1; } else { j0 = 0; j1 = i; } for(j=j0; j<=j1; j++) { v = ae_fabs(a->ptr.pp_double[i][j], _state); state->absamax = ae_maxreal(state->absamax, v, _state); state->absasum = state->absasum+v; state->absasum2 = state->absasum2+v*v; } } } /************************************************************************* Internal function which allows to rewrite diagonal of quadratic term. For internal use only. This function can be used only when you have dense A and already made MinQPSetQuadraticTerm(Fast) call. -- ALGLIB -- Copyright 16.01.2011 by Bochkanov Sergey *************************************************************************/ void minqprewritediagonal(minqpstate* state, /* Real */ ae_vector* s, ae_state *_state) { cqmrewritedensediagonal(&state->a, s, _state); } /************************************************************************* Fast version of MinQPSetStartingPoint(), which doesn't check its arguments. For internal use only. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetstartingpointfast(minqpstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->n; ae_v_move(&state->startx.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); state->havex = ae_true; } /************************************************************************* Fast version of MinQPSetOrigin(), which doesn't check its arguments. For internal use only. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minqpsetoriginfast(minqpstate* state, /* Real */ ae_vector* xorigin, ae_state *_state) { ae_int_t n; n = state->n; ae_v_move(&state->xorigin.ptr.p_double[0], 1, &xorigin->ptr.p_double[0], 1, ae_v_len(0,n-1)); } void _minqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minqpstate *p = (minqpstate*)_p; ae_touch_ptr((void*)p); _qqpsettings_init(&p->qqpsettingsuser, _state, make_automatic); _qpbleicsettings_init(&p->qpbleicsettingsuser, _state, make_automatic); _qpdenseaulsettings_init(&p->qpdenseaulsettingsuser, _state, make_automatic); _convexquadraticmodel_init(&p->a, _state, make_automatic); _sparsematrix_init(&p->sparsea, _state, make_automatic); ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->xorigin, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->startx, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->densec, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsec, _state, make_automatic); ae_vector_init(&p->cl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->replagbc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->replaglc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->effectives, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->ecleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->elaglc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->elagmlt, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->elagidx, 0, DT_INT, _state, make_automatic); ae_matrix_init(&p->dummyr2, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->dummysparse, _state, make_automatic); ae_matrix_init(&p->tmpr2, 0, 0, DT_REAL, _state, make_automatic); _qpbleicbuffers_init(&p->qpbleicbuf, _state, make_automatic); _qqpbuffers_init(&p->qqpbuf, _state, make_automatic); _qpdenseaulbuffers_init(&p->qpdenseaulbuf, _state, make_automatic); _vipmstate_init(&p->vsolver, _state, make_automatic); } void _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minqpstate *dst = (minqpstate*)_dst; minqpstate *src = (minqpstate*)_src; dst->n = src->n; _qqpsettings_init_copy(&dst->qqpsettingsuser, &src->qqpsettingsuser, _state, make_automatic); _qpbleicsettings_init_copy(&dst->qpbleicsettingsuser, &src->qpbleicsettingsuser, _state, make_automatic); _qpdenseaulsettings_init_copy(&dst->qpdenseaulsettingsuser, &src->qpdenseaulsettingsuser, _state, make_automatic); dst->veps = src->veps; dst->dbgskipconstraintnormalization = src->dbgskipconstraintnormalization; dst->algokind = src->algokind; dst->akind = src->akind; _convexquadraticmodel_init_copy(&dst->a, &src->a, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsea, &src->sparsea, _state, make_automatic); dst->sparseaupper = src->sparseaupper; dst->absamax = src->absamax; dst->absasum = src->absasum; dst->absasum2 = src->absasum2; ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); dst->stype = src->stype; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic); ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic); ae_vector_init_copy(&dst->xorigin, &src->xorigin, _state, make_automatic); ae_vector_init_copy(&dst->startx, &src->startx, _state, make_automatic); dst->havex = src->havex; ae_matrix_init_copy(&dst->densec, &src->densec, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsec, &src->sparsec, _state, make_automatic); ae_vector_init_copy(&dst->cl, &src->cl, _state, make_automatic); ae_vector_init_copy(&dst->cu, &src->cu, _state, make_automatic); dst->mdense = src->mdense; dst->msparse = src->msparse; ae_vector_init_copy(&dst->xs, &src->xs, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repncholesky = src->repncholesky; dst->repnmv = src->repnmv; dst->repterminationtype = src->repterminationtype; ae_vector_init_copy(&dst->replagbc, &src->replagbc, _state, make_automatic); ae_vector_init_copy(&dst->replaglc, &src->replaglc, _state, make_automatic); ae_vector_init_copy(&dst->effectives, &src->effectives, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_matrix_init_copy(&dst->ecleic, &src->ecleic, _state, make_automatic); ae_vector_init_copy(&dst->elaglc, &src->elaglc, _state, make_automatic); ae_vector_init_copy(&dst->elagmlt, &src->elagmlt, _state, make_automatic); ae_vector_init_copy(&dst->elagidx, &src->elagidx, _state, make_automatic); ae_matrix_init_copy(&dst->dummyr2, &src->dummyr2, _state, make_automatic); _sparsematrix_init_copy(&dst->dummysparse, &src->dummysparse, _state, make_automatic); ae_matrix_init_copy(&dst->tmpr2, &src->tmpr2, _state, make_automatic); dst->qpbleicfirstcall = src->qpbleicfirstcall; _qpbleicbuffers_init_copy(&dst->qpbleicbuf, &src->qpbleicbuf, _state, make_automatic); _qqpbuffers_init_copy(&dst->qqpbuf, &src->qqpbuf, _state, make_automatic); _qpdenseaulbuffers_init_copy(&dst->qpdenseaulbuf, &src->qpdenseaulbuf, _state, make_automatic); _vipmstate_init_copy(&dst->vsolver, &src->vsolver, _state, make_automatic); } void _minqpstate_clear(void* _p) { minqpstate *p = (minqpstate*)_p; ae_touch_ptr((void*)p); _qqpsettings_clear(&p->qqpsettingsuser); _qpbleicsettings_clear(&p->qpbleicsettingsuser); _qpdenseaulsettings_clear(&p->qpdenseaulsettingsuser); _convexquadraticmodel_clear(&p->a); _sparsematrix_clear(&p->sparsea); ae_vector_clear(&p->b); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->s); ae_vector_clear(&p->havebndl); ae_vector_clear(&p->havebndu); ae_vector_clear(&p->xorigin); ae_vector_clear(&p->startx); ae_matrix_clear(&p->densec); _sparsematrix_clear(&p->sparsec); ae_vector_clear(&p->cl); ae_vector_clear(&p->cu); ae_vector_clear(&p->xs); ae_vector_clear(&p->replagbc); ae_vector_clear(&p->replaglc); ae_vector_clear(&p->effectives); ae_vector_clear(&p->tmp0); ae_matrix_clear(&p->ecleic); ae_vector_clear(&p->elaglc); ae_vector_clear(&p->elagmlt); ae_vector_clear(&p->elagidx); ae_matrix_clear(&p->dummyr2); _sparsematrix_clear(&p->dummysparse); ae_matrix_clear(&p->tmpr2); _qpbleicbuffers_clear(&p->qpbleicbuf); _qqpbuffers_clear(&p->qqpbuf); _qpdenseaulbuffers_clear(&p->qpdenseaulbuf); _vipmstate_clear(&p->vsolver); } void _minqpstate_destroy(void* _p) { minqpstate *p = (minqpstate*)_p; ae_touch_ptr((void*)p); _qqpsettings_destroy(&p->qqpsettingsuser); _qpbleicsettings_destroy(&p->qpbleicsettingsuser); _qpdenseaulsettings_destroy(&p->qpdenseaulsettingsuser); _convexquadraticmodel_destroy(&p->a); _sparsematrix_destroy(&p->sparsea); ae_vector_destroy(&p->b); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->s); ae_vector_destroy(&p->havebndl); ae_vector_destroy(&p->havebndu); ae_vector_destroy(&p->xorigin); ae_vector_destroy(&p->startx); ae_matrix_destroy(&p->densec); _sparsematrix_destroy(&p->sparsec); ae_vector_destroy(&p->cl); ae_vector_destroy(&p->cu); ae_vector_destroy(&p->xs); ae_vector_destroy(&p->replagbc); ae_vector_destroy(&p->replaglc); ae_vector_destroy(&p->effectives); ae_vector_destroy(&p->tmp0); ae_matrix_destroy(&p->ecleic); ae_vector_destroy(&p->elaglc); ae_vector_destroy(&p->elagmlt); ae_vector_destroy(&p->elagidx); ae_matrix_destroy(&p->dummyr2); _sparsematrix_destroy(&p->dummysparse); ae_matrix_destroy(&p->tmpr2); _qpbleicbuffers_destroy(&p->qpbleicbuf); _qqpbuffers_destroy(&p->qqpbuf); _qpdenseaulbuffers_destroy(&p->qpdenseaulbuf); _vipmstate_destroy(&p->vsolver); } void _minqpreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minqpreport *p = (minqpreport*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->lagbc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->laglc, 0, DT_REAL, _state, make_automatic); } void _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minqpreport *dst = (minqpreport*)_dst; minqpreport *src = (minqpreport*)_src; dst->inneriterationscount = src->inneriterationscount; dst->outeriterationscount = src->outeriterationscount; dst->nmv = src->nmv; dst->ncholesky = src->ncholesky; dst->terminationtype = src->terminationtype; ae_vector_init_copy(&dst->lagbc, &src->lagbc, _state, make_automatic); ae_vector_init_copy(&dst->laglc, &src->laglc, _state, make_automatic); } void _minqpreport_clear(void* _p) { minqpreport *p = (minqpreport*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->lagbc); ae_vector_clear(&p->laglc); } void _minqpreport_destroy(void* _p) { minqpreport *p = (minqpreport*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->lagbc); ae_vector_destroy(&p->laglc); } #endif #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD) void dsssettingsinit(dualsimplexsettings* settings, ae_state *_state) { settings->pivottol = 10*ae_sqrt(ae_machineepsilon, _state); settings->perturbmag = 10*settings->pivottol; settings->maxtrfage = reviseddualsimplex_defaultmaxtrfage; settings->trftype = 3; settings->ratiotest = 1; settings->pricing = 1; settings->shifting = 2; } /************************************************************************* This function initializes DSS structure. Previously allocated memory is reused as much as possible. Default state of the problem is zero cost vector, all variables are fixed at zero. -- ALGLIB -- Copyright 01.07.2018 by Bochkanov Sergey *************************************************************************/ void dssinit(ae_int_t n, dualsimplexstate* s, ae_state *_state) { ae_int_t i; ae_assert(n>0, "DSSInit: N<=0", _state); s->ns = n; s->m = 0; rvectorsetlengthatleast(&s->varscales, n, _state); rvectorsetlengthatleast(&s->rawbndl, n, _state); rvectorsetlengthatleast(&s->rawbndu, n, _state); for(i=0; i<=n-1; i++) { s->varscales.ptr.p_double[i] = 1.0; s->rawbndl.ptr.p_double[i] = 0.0; s->rawbndu.ptr.p_double[i] = 0.0; } reviseddualsimplex_subprobleminit(n, &s->primary, _state); reviseddualsimplex_basisinit(n, 0, &s->basis, _state); rvectorsetlengthatleast(&s->repx, n, _state); rvectorsetlengthatleast(&s->repdx, 0, _state); ivectorsetlengthatleast(&s->repstats, n, _state); for(i=0; i<=n-1; i++) { s->repx.ptr.p_double[i] = 0.0; s->repstats.ptr.p_int[i] = 1; } } /************************************************************************* This function specifies LP problem INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. BndL - lower bounds, array[N]. BndU - upper bounds, array[N]. SV - scales, array[N]. DenseA - dense array[K,N], dense linear constraints SparseA - sparse linear constraints, sparsematrix[K,N] in CRS format AKind - type of A: 0 for dense, 1 for sparse AL, AU - lower and upper bounds, array[K] K - number of equality/inequality constraints, K>=0. ProposedBasis- basis to import from (if BasisType=2) BasisInitType- what to do with basis: * 0 - set new basis to all-logicals * 1 - try to reuse previous basis as much as possible * 2 - try to import basis from ProposedBasis Settings- algorithm settings -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void dsssetproblem(dualsimplexstate* state, /* Real */ ae_vector* c, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* sv, /* Real */ ae_matrix* densea, sparsematrix* sparsea, ae_int_t akind, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_int_t k, dualsimplexbasis* proposedbasis, ae_int_t basisinittype, dualsimplexsettings* settings, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t jj; ae_int_t offs; ae_int_t ns; ae_int_t j0; ae_int_t j1; ae_bool processed; ae_int_t oldm; ae_bool basisinitialized; double v; double vv; ns = state->primary.ns; oldm = state->primary.m; /* * Integrity checks */ ae_assert(bndl->cnt>=ns, "DSSSetProblem: Length(BndL)cnt>=ns, "DSSSetProblem: Length(BndU)cnt>=ns, "DSSSetProblem: Length(SV)cnt>=ns, "SubproblemSetCost: Length(C)=0, "DSSSetProblem: K<0", _state); if( k>0&&akind==1 ) { ae_assert(sparsea->m==k, "DSSSetProblem: rows(A)<>K", _state); ae_assert(sparsea->n==ns, "DSSSetProblem: cols(A)<>N", _state); } /* * Downgrade state */ reviseddualsimplex_downgradestate(&state->primary, reviseddualsimplex_ssinvalid, _state); /* * Reallocate storage */ rvectorgrowto(&state->primary.bndl, ns+k, _state); rvectorgrowto(&state->primary.bndu, ns+k, _state); ivectorgrowto(&state->primary.bndt, ns+k, _state); rvectorgrowto(&state->primary.rawc, ns+k, _state); rvectorgrowto(&state->primary.effc, ns+k, _state); rvectorgrowto(&state->primary.xa, ns+k, _state); rvectorgrowto(&state->primary.d, ns+k, _state); rvectorgrowto(&state->primary.xb, k, _state); rvectorgrowto(&state->primary.bndlb, k, _state); rvectorgrowto(&state->primary.bndub, k, _state); ivectorgrowto(&state->primary.bndtb, k, _state); /* * Save original problem formulation */ state->ns = ns; state->m = k; for(i=0; i<=ns-1; i++) { state->varscales.ptr.p_double[i] = sv->ptr.p_double[i]; state->rawbndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->rawbndu.ptr.p_double[i] = bndu->ptr.p_double[i]; } /* * Setup cost, scale and box constraints */ for(i=0; i<=ns-1; i++) { ae_assert(sv->ptr.p_double[i]>0, "DSSSetProblem: SV<=0", _state); state->primary.rawc.ptr.p_double[i] = c->ptr.p_double[i]*sv->ptr.p_double[i]; state->primary.effc.ptr.p_double[i] = c->ptr.p_double[i]*sv->ptr.p_double[i]; } for(i=0; i<=ns-1; i++) { ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "DSSSetProblem: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "DSSSetProblem: BndU contains NAN or -INF", _state); state->primary.bndl.ptr.p_double[i] = bndl->ptr.p_double[i]/sv->ptr.p_double[i]; state->primary.bndu.ptr.p_double[i] = bndu->ptr.p_double[i]/sv->ptr.p_double[i]; /* * Set bound type */ if( ae_isfinite(bndl->ptr.p_double[i], _state)&&ae_isfinite(bndu->ptr.p_double[i], _state) ) { if( ae_fp_greater(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_ccinfeasible; } if( ae_fp_less(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_ccrange; } if( ae_fp_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_ccfixed; } continue; } if( ae_isfinite(bndl->ptr.p_double[i], _state)&&!ae_isfinite(bndu->ptr.p_double[i], _state) ) { state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_cclower; continue; } if( !ae_isfinite(bndl->ptr.p_double[i], _state)&&ae_isfinite(bndu->ptr.p_double[i], _state) ) { state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_ccupper; continue; } ae_assert(ae_isneginf(bndl->ptr.p_double[i], _state)&&ae_isposinf(bndu->ptr.p_double[i], _state), "DSSSetProblem: integrity check failed", _state); state->primary.bndt.ptr.p_int[i] = reviseddualsimplex_ccfree; } /* * Quick exit if no linear constraints is present */ if( k==0 ) { state->primary.m = 0; reviseddualsimplex_basisinit(state->primary.ns, state->primary.m, &state->basis, _state); return; } /* * Extend A with structural terms and transpose it: * * allocate place for A^T extended with logical part. * * copy with transposition * * perform integrity check for array sizes * * manually append new items * * update DIdx/UIdx */ processed = ae_false; state->primary.m = k; if( akind==0 ) { ae_assert(ae_false, "DSSSetProblem: does not support dense inputs yet", _state); } if( akind==1 ) { /* * Transpose constraints matrix, apply column and row scaling. * Extend it with identity submatrix. * * NOTE: in order to improve stability of LU factorization we * normalize rows using 2-norm, not INF-norm. Having rows * normalized with 2-norm makes every element less than * 1.0 in magnitude, which allows us later to move logical * columns to the beginning of LU factors without loosing * stability. */ rvectorsetlengthatleast(&state->at.vals, sparsea->ridx.ptr.p_int[k]+k, _state); ivectorsetlengthatleast(&state->at.idx, sparsea->ridx.ptr.p_int[k]+k, _state); ivectorsetlengthatleast(&state->at.ridx, ns+k+1, _state); ivectorsetlengthatleast(&state->at.didx, ns+k, _state); ivectorsetlengthatleast(&state->at.uidx, ns+k, _state); sparsecopytransposecrsbuf(sparsea, &state->at, _state); rvectorsetlengthatleast(&state->rowscales, k, _state); for(i=0; i<=k-1; i++) { state->rowscales.ptr.p_double[i] = (double)(0); } for(i=0; i<=ns-1; i++) { j0 = state->at.ridx.ptr.p_int[i]; j1 = state->at.ridx.ptr.p_int[i+1]-1; v = sv->ptr.p_double[i]; for(j=j0; j<=j1; j++) { vv = v*state->at.vals.ptr.p_double[j]; jj = state->at.idx.ptr.p_int[j]; state->at.vals.ptr.p_double[j] = vv; state->rowscales.ptr.p_double[jj] = state->rowscales.ptr.p_double[jj]+vv*vv; } } rvectorsetlengthatleast(&state->tmp0, k, _state); for(i=0; i<=k-1; i++) { state->rowscales.ptr.p_double[i] = coalesce(ae_sqrt(state->rowscales.ptr.p_double[i], _state), (double)(1), _state); state->tmp0.ptr.p_double[i] = 1/state->rowscales.ptr.p_double[i]; } for(i=0; i<=ns-1; i++) { j0 = state->at.ridx.ptr.p_int[i]; j1 = state->at.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { state->at.vals.ptr.p_double[j] = state->at.vals.ptr.p_double[j]*state->tmp0.ptr.p_double[state->at.idx.ptr.p_int[j]]; } } ae_assert(state->at.vals.cnt>=sparsea->ridx.ptr.p_int[k]+k, "DSSSetProblem: integrity check failed", _state); ae_assert(state->at.idx.cnt>=sparsea->ridx.ptr.p_int[k]+k, "DSSSetProblem: integrity check failed", _state); ae_assert(state->at.ridx.cnt>=ns+k+1, "DSSSetProblem: integrity check failed", _state); ae_assert(state->at.didx.cnt>=ns+k, "DSSSetProblem: integrity check failed", _state); ae_assert(state->at.uidx.cnt>=ns+k, "DSSSetProblem: integrity check failed", _state); offs = state->at.ridx.ptr.p_int[ns]; for(i=0; i<=k-1; i++) { state->at.vals.ptr.p_double[offs+i] = -1.0; state->at.idx.ptr.p_int[offs+i] = i; state->at.ridx.ptr.p_int[ns+i+1] = state->at.ridx.ptr.p_int[ns+i]+1; state->at.ninitialized = state->at.ninitialized+1; } state->at.m = state->at.m+k; sparseinitduidx(&state->at, _state); processed = ae_true; } ae_assert(processed, "DSSSetProblem: integrity check failed (akind)", _state); /* * Copy AL, AU to BndL/BndT */ for(i=0; i<=k-1; i++) { ae_assert(ae_isfinite(al->ptr.p_double[i], _state)||ae_isneginf(al->ptr.p_double[i], _state), "DSSSetProblem: AL contains NAN or +INF", _state); ae_assert(ae_isfinite(au->ptr.p_double[i], _state)||ae_isposinf(au->ptr.p_double[i], _state), "DSSSetProblem: AU contains NAN or -INF", _state); state->primary.bndl.ptr.p_double[ns+i] = al->ptr.p_double[i]/state->rowscales.ptr.p_double[i]; state->primary.bndu.ptr.p_double[ns+i] = au->ptr.p_double[i]/state->rowscales.ptr.p_double[i]; /* * Set bound type */ if( ae_isfinite(al->ptr.p_double[i], _state)&&ae_isfinite(au->ptr.p_double[i], _state) ) { if( ae_fp_greater(al->ptr.p_double[i],au->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_ccinfeasible; } if( ae_fp_less(al->ptr.p_double[i],au->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_ccrange; } if( ae_fp_eq(al->ptr.p_double[i],au->ptr.p_double[i]) ) { state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_ccfixed; } continue; } if( ae_isfinite(al->ptr.p_double[i], _state)&&!ae_isfinite(au->ptr.p_double[i], _state) ) { state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_cclower; continue; } if( !ae_isfinite(al->ptr.p_double[i], _state)&&ae_isfinite(au->ptr.p_double[i], _state) ) { state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_ccupper; continue; } ae_assert(ae_isneginf(al->ptr.p_double[i], _state)&&ae_isposinf(au->ptr.p_double[i], _state), "DSSSetProblem: integrity check faoled", _state); state->primary.bndt.ptr.p_int[ns+i] = reviseddualsimplex_ccfree; } /* * Depending on BasisInitType either start from all-logical basis * or try to reuse already existing basis. * * NOTE: current version does not support basis shrinkage, only * growing basis can be reused. */ basisinitialized = ae_false; if( basisinittype==2 ) { /* * Import basis from one proposed by caller */ ae_assert(proposedbasis->ns==state->primary.ns, "DSSSetProblemX: unable to import basis, sizes do not match", _state); ae_assert(proposedbasis->m==state->primary.m, "DSSSetProblemX: unable to import basis, sizes do not match", _state); basisinitialized = reviseddualsimplex_basistryimportfrom(&state->basis, proposedbasis, &state->at, settings, _state); } if( basisinittype==1&&state->primary.m>=oldm ) { /* * New rows were added, try to reuse previous basis */ for(i=oldm; i<=state->primary.m-1; i++) { state->primary.rawc.ptr.p_double[ns+i] = 0.0; state->primary.effc.ptr.p_double[ns+i] = 0.0; state->primary.xa.ptr.p_double[ns+i] = 0.0; state->primary.d.ptr.p_double[ns+i] = 0.0; } basisinitialized = reviseddualsimplex_basistryresize(&state->basis, state->primary.m, &state->at, settings, _state); } if( !basisinitialized ) { /* * Straightforward code for all-logicals basis */ for(i=0; i<=k-1; i++) { state->primary.rawc.ptr.p_double[ns+i] = 0.0; state->primary.effc.ptr.p_double[ns+i] = 0.0; state->primary.xa.ptr.p_double[ns+i] = 0.0; state->primary.d.ptr.p_double[ns+i] = 0.0; } reviseddualsimplex_basisinit(state->primary.ns, state->primary.m, &state->basis, _state); reviseddualsimplex_basisfreshtrf(&state->basis, &state->at, settings, _state); } rvectorgrowto(&state->repy, state->primary.m, _state); rvectorgrowto(&state->repdx, state->primary.m, _state); ivectorgrowto(&state->repstats, state->primary.ns+state->primary.m, _state); } /************************************************************************* This function exports basis from the primary (phase II) subproblem. INPUT PARAMETERS: State - structure OUTPUT PARAMETERS Basis - current basis exported (no factorization, only set of basis/nonbasic variables) -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void dssexportbasis(dualsimplexstate* state, dualsimplexbasis* basis, ae_state *_state) { reviseddualsimplex_basisexportto(&state->basis, basis, _state); } /************************************************************************* This function performs actual solution. INPUT PARAMETERS: State - state Solution results can be found in fields of State which are explicitly declared as accessible by external code. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void dssoptimize(dualsimplexstate* state, dualsimplexsettings* settings, ae_state *_state) { ae_frame _frame_block; ae_int_t nx; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; double v; hqrndstate rs; ae_frame_make(_state, &_frame_block); memset(&rs, 0, sizeof(rs)); _hqrndstate_init(&rs, _state, ae_true); nx = state->primary.ns+state->primary.m; m = state->primary.m; /* * Init report fields */ state->repf = (double)(0); state->repiterationscount = 0; state->repiterationscount1 = 0; state->repiterationscount2 = 0; state->repiterationscount3 = 0; state->repterminationtype = 1; state->repprimalerror = (double)(0); state->repdualerror = (double)(0); /* * Handle case when M=0; after this block we assume that M>0. */ if( m==0 ) { reviseddualsimplex_solveboxonly(state, _state); state->repf = (double)(0); for(i=0; i<=state->primary.ns-1; i++) { state->repf = state->repf+state->repx.ptr.p_double[i]*state->primary.rawc.ptr.p_double[i]; } reviseddualsimplex_unscaleandenforce(&state->repx, &state->repy, &state->repdx, state, _state); ae_frame_leave(_state); return; } /* * Most basic check for correctness of box constraints */ for(i=0; i<=nx-1; i++) { if( state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccinfeasible ) { state->repterminationtype = -3; reviseddualsimplex_setzeroxystats(state, _state); ae_frame_leave(_state); return; } } /* * Initialization: * * column scales, initial perturbed C[] */ hqrndseed(7456, 2355, &rs, _state); rvectorsetlengthatleast(&state->primary.colscales, nx, _state); for(i=0; i<=nx-1; i++) { /* * compute column scales */ v = ae_fabs(state->primary.rawc.ptr.p_double[i], _state); j0 = state->at.ridx.ptr.p_int[i]; j1 = state->at.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { v = ae_maxreal(v, ae_fabs(state->at.vals.ptr.p_double[j], _state), _state); } state->primary.colscales.ptr.p_double[i] = coalesce(v, 1.0, _state); /* * apply perturbation */ if( !reviseddualsimplex_isfree(&state->primary, i, _state) ) { v = settings->perturbmag*state->primary.colscales.ptr.p_double[i]*(1+hqrnduniformr(&rs, _state)); if( reviseddualsimplex_hasbndu(&state->primary, i, _state) ) { v = -v; } state->primary.effc.ptr.p_double[i] = state->primary.rawc.ptr.p_double[i]+v; } } /* * Solve phase 1 subproblem, then perturbed subproblem */ reviseddualsimplex_basisfreshtrf(&state->basis, &state->at, settings, _state); if( state->primary.state==reviseddualsimplex_ssinvalid ) { reviseddualsimplex_subprobleminferinitialxn(state, &state->primary, _state); } if( state->primary.state==reviseddualsimplex_ssvalidxn ) { reviseddualsimplex_subproblemhandlexnupdate(state, &state->primary, _state); } ae_assert(state->primary.state==reviseddualsimplex_ssvalid, "DSS: integrity check failed (init)", _state); reviseddualsimplex_invokephase1(state, settings, _state); if( state->repterminationtype<=0 ) { /* * Primal unbounded, dual infeasible */ ae_assert(state->repterminationtype==-4, "DSS: integrity check for InvokePhase1() result failed", _state); state->repf = (double)(0); state->repprimalerror = (double)(0); state->repdualerror = (double)(0); reviseddualsimplex_setzeroxystats(state, _state); ae_frame_leave(_state); return; } reviseddualsimplex_solvesubproblemdual(state, &state->primary, ae_false, settings, &state->repterminationtype, _state); if( state->repterminationtype<=0 ) { /* * Primal infeasible */ ae_assert(state->repterminationtype==-3, "DSS: integrity check for SolveSubproblemDual() result failed", _state); state->repf = (double)(0); state->repprimalerror = (double)(0); state->repdualerror = (double)(0); reviseddualsimplex_setzeroxystats(state, _state); ae_frame_leave(_state); return; } /* * Remove perturbation from the cost vector, * then use primal simplex to enforce dual feasibility * after removal of the perturbation (if necessary). */ reviseddualsimplex_subprobleminitphase3(&state->primary, &state->phase3, _state); for(i=0; i<=nx-1; i++) { state->phase3.effc.ptr.p_double[i] = state->primary.rawc.ptr.p_double[i]; } ae_assert(state->phase3.state>=reviseddualsimplex_ssvalidxn, "DSS: integrity check failed (remove perturbation)", _state); reviseddualsimplex_subproblemhandlexnupdate(state, &state->phase3, _state); reviseddualsimplex_solvesubproblemprimal(state, &state->phase3, settings, &state->repterminationtype, _state); if( state->repterminationtype<=0 ) { /* * Dual infeasible, primal unbounded */ ae_assert(state->repterminationtype==-4, "DSS: integrity check for SolveSubproblemPrimal() result failed", _state); state->repf = (double)(0); state->repprimalerror = (double)(0); state->repdualerror = (double)(0); reviseddualsimplex_setzeroxystats(state, _state); ae_frame_leave(_state); return; } for(i=0; i<=nx-1; i++) { state->primary.xa.ptr.p_double[i] = state->phase3.xa.ptr.p_double[i]; if( reviseddualsimplex_hasbndl(&state->primary, i, _state) ) { state->primary.xa.ptr.p_double[i] = ae_maxreal(state->primary.xa.ptr.p_double[i], state->primary.bndl.ptr.p_double[i], _state); } if( reviseddualsimplex_hasbndu(&state->primary, i, _state) ) { state->primary.xa.ptr.p_double[i] = ae_minreal(state->primary.xa.ptr.p_double[i], state->primary.bndu.ptr.p_double[i], _state); } } /* * Primal and dual feasible, problem solved */ rvectorsetlengthatleast(&state->tmp0, m, _state); rvectorsetlengthatleast(&state->tmp1, m, _state); state->repf = (double)(0); for(i=0; i<=state->primary.ns-1; i++) { state->repf = state->repf+state->primary.xa.ptr.p_double[i]*state->primary.rawc.ptr.p_double[i]; } state->repprimalerror = (double)(0); state->repdualerror = (double)(0); state->repterminationtype = 1; for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = state->primary.rawc.ptr.p_double[state->basis.idx.ptr.p_int[i]]; } reviseddualsimplex_basissolvet(&state->basis, &state->tmp0, &state->tmp1, &state->tmp2, _state); reviseddualsimplex_computeantv(state, &state->tmp1, &state->primary.d, _state); for(i=0; i<=state->ns-1; i++) { j = state->basis.nidx.ptr.p_int[i]; state->primary.d.ptr.p_double[j] = state->primary.rawc.ptr.p_double[j]-state->primary.d.ptr.p_double[j]; } for(i=0; i<=m-1; i++) { state->repy.ptr.p_double[i] = state->tmp1.ptr.p_double[i]; state->repdx.ptr.p_double[i] = state->primary.d.ptr.p_double[state->ns+i]; } for(i=0; i<=nx-1; i++) { if( state->basis.isbasic.ptr.p_bool[i] ) { state->repstats.ptr.p_int[i] = 0; continue; } if( reviseddualsimplex_hasbndl(&state->primary, i, _state)&&ae_fp_eq(state->primary.xa.ptr.p_double[i],state->primary.bndl.ptr.p_double[i]) ) { state->repstats.ptr.p_int[i] = -1; continue; } if( reviseddualsimplex_hasbndu(&state->primary, i, _state)&&ae_fp_eq(state->primary.xa.ptr.p_double[i],state->primary.bndu.ptr.p_double[i]) ) { state->repstats.ptr.p_int[i] = 1; continue; } ae_assert(!reviseddualsimplex_hasbndl(&state->primary, i, _state)&&!reviseddualsimplex_hasbndu(&state->primary, i, _state), "DSSOptimize: integrity check failed (zetta5)", _state); state->repstats.ptr.p_int[i] = 0; } for(i=0; i<=state->primary.ns-1; i++) { state->repx.ptr.p_double[i] = state->primary.xa.ptr.p_double[i]; } reviseddualsimplex_unscaleandenforce(&state->repx, &state->repy, &state->repdx, state, _state); ae_frame_leave(_state); } /************************************************************************* This function initializes subproblem structure. Previously allocated memory is reused as much as possible. Default state of the problem is zero cost vector, all variables are fixed at zero, linear constraint matrix is zero. -- ALGLIB -- Copyright 01.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_subprobleminit(ae_int_t n, dualsimplexsubproblem* s, ae_state *_state) { ae_int_t i; ae_assert(n>0, "SubproblemInit: N<=0", _state); s->ns = n; s->m = 0; s->state = reviseddualsimplex_ssinvalid; rvectorsetlengthatleast(&s->xa, n, _state); rvectorsetlengthatleast(&s->xb, 0, _state); rvectorsetlengthatleast(&s->d, n, _state); rvectorsetlengthatleast(&s->rawc, n, _state); rvectorsetlengthatleast(&s->effc, n, _state); rvectorsetlengthatleast(&s->bndl, n, _state); rvectorsetlengthatleast(&s->bndu, n, _state); ivectorsetlengthatleast(&s->bndt, n, _state); for(i=0; i<=n-1; i++) { s->rawc.ptr.p_double[i] = (double)(0); s->effc.ptr.p_double[i] = (double)(0); s->bndl.ptr.p_double[i] = (double)(0); s->bndu.ptr.p_double[i] = (double)(0); s->bndt.ptr.p_int[i] = reviseddualsimplex_ccfixed; s->xa.ptr.p_double[i] = 0.0; s->d.ptr.p_double[i] = 0.0; } } /************************************************************************* This function initializes phase #1 subproblem which minimizes sum of dual infeasibilities. It is required that total count of non-boxed non-fixed variables is at least M. It splits out basic components of XA[] to XB[] -- ALGLIB -- Copyright 01.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_subprobleminitphase1(dualsimplexsubproblem* s0, dualsimplexbasis* basis, dualsimplexsubproblem* s1, ae_state *_state) { ae_int_t i; s1->ns = s0->ns; s1->m = s0->m; copyrealarray(&s0->rawc, &s1->rawc, _state); copyrealarray(&s0->effc, &s1->effc, _state); copyrealarray(&s0->colscales, &s1->colscales, _state); copyrealarray(&s0->bndl, &s1->bndl, _state); copyrealarray(&s0->bndu, &s1->bndu, _state); copyintegerarray(&s0->bndt, &s1->bndt, _state); copyrealarray(&s0->xa, &s1->xa, _state); copyrealarray(&s0->xb, &s1->xb, _state); copyrealarray(&s0->bndlb, &s1->bndlb, _state); copyrealarray(&s0->bndub, &s1->bndub, _state); copyintegerarray(&s0->bndtb, &s1->bndtb, _state); copyrealarray(&s0->d, &s1->d, _state); for(i=0; i<=s1->ns+s1->m-1; i++) { if( s1->bndt.ptr.p_int[i]==reviseddualsimplex_cclower ) { s1->bndt.ptr.p_int[i] = reviseddualsimplex_ccrange; s1->bndl.ptr.p_double[i] = (double)(0); s1->bndu.ptr.p_double[i] = (double)(1); s1->xa.ptr.p_double[i] = (double)(0); continue; } if( s1->bndt.ptr.p_int[i]==reviseddualsimplex_ccupper ) { s1->bndt.ptr.p_int[i] = reviseddualsimplex_ccrange; s1->bndl.ptr.p_double[i] = (double)(-1); s1->bndu.ptr.p_double[i] = (double)(0); s1->xa.ptr.p_double[i] = (double)(0); continue; } if( s1->bndt.ptr.p_int[i]==reviseddualsimplex_ccfree ) { s1->bndt.ptr.p_int[i] = reviseddualsimplex_ccrange; s1->bndl.ptr.p_double[i] = (double)(-1); s1->bndu.ptr.p_double[i] = (double)(1); if( ae_fp_greater_eq(s1->effc.ptr.p_double[i],(double)(0)) ) { s1->xa.ptr.p_double[i] = (double)(-1); } else { s1->xa.ptr.p_double[i] = (double)(1); } continue; } s1->bndt.ptr.p_int[i] = reviseddualsimplex_ccfixed; s1->bndl.ptr.p_double[i] = (double)(0); s1->bndu.ptr.p_double[i] = (double)(0); s1->xa.ptr.p_double[i] = (double)(0); } s1->state = reviseddualsimplex_ssvalidxn; } /************************************************************************* This function initializes phase #3 subproblem which applies primal simplex method to the result of the phase #2. It also performs modification of the subproblem in order to ensure that initial point is primal feasible. NOTE: this function expects that all components (basic and nonbasic ones) are stored in XA[] -- ALGLIB -- Copyright 01.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_subprobleminitphase3(dualsimplexsubproblem* s0, dualsimplexsubproblem* s1, ae_state *_state) { s1->ns = s0->ns; s1->m = s0->m; copyrealarray(&s0->rawc, &s1->rawc, _state); copyrealarray(&s0->effc, &s1->effc, _state); copyrealarray(&s0->colscales, &s1->colscales, _state); copyrealarray(&s0->bndl, &s1->bndl, _state); copyrealarray(&s0->bndu, &s1->bndu, _state); copyintegerarray(&s0->bndt, &s1->bndt, _state); copyrealarray(&s0->xa, &s1->xa, _state); copyrealarray(&s0->xb, &s1->xb, _state); copyrealarray(&s0->bndlb, &s1->bndlb, _state); copyrealarray(&s0->bndub, &s1->bndub, _state); copyintegerarray(&s0->bndtb, &s1->bndtb, _state); copyrealarray(&s0->d, &s1->d, _state); s1->state = reviseddualsimplex_ssvalidxn; } /************************************************************************* This function infers nonbasic variables of X using sign of effective C[]. Only non-basic components of XN are changed; everything else is NOT updated. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_subprobleminferinitialxn(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state) { ae_int_t i; ae_int_t ii; ae_int_t bndt; for(ii=0; ii<=s->ns-1; ii++) { i = state->basis.nidx.ptr.p_int[ii]; bndt = s->bndt.ptr.p_int[i]; if( bndt==reviseddualsimplex_ccfixed||bndt==reviseddualsimplex_ccrange ) { if( s->effc.ptr.p_double[i]>=0 ) { s->xa.ptr.p_double[i] = s->bndl.ptr.p_double[i]; } else { s->xa.ptr.p_double[i] = s->bndu.ptr.p_double[i]; } continue; } if( bndt==reviseddualsimplex_cclower ) { s->xa.ptr.p_double[i] = s->bndl.ptr.p_double[i]; continue; } if( bndt==reviseddualsimplex_ccupper ) { s->xa.ptr.p_double[i] = s->bndu.ptr.p_double[i]; continue; } if( bndt==reviseddualsimplex_ccfree ) { s->xa.ptr.p_double[i] = 0.0; continue; } ae_assert(ae_false, "SubproblemInferInitialXN: integrity check failed (infeasible constraint)", _state); } s->state = reviseddualsimplex_ssvalidxn; } /************************************************************************* This function infers basic variables of X using values of non-basic vars and updates reduced cost vector D and target function Z. Sets state age to zero. D[] is allocated during computations. Temporary vectors Tmp0 and Tmp1 are used (reallocated as needed). NOTE: this function expects that both nonbasic and basic components are stored in XA[]. XB[] array is not referenced. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_subproblemhandlexnupdate(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t m; ae_int_t nn; ae_assert(s->state>=reviseddualsimplex_ssvalidxn, "SubproblemHandleXNUpdate: integrity check failed (XN is not valid)", _state); nn = s->ns; m = s->m; /* * Compute nonbasic components */ reviseddualsimplex_computeanxn(state, s, &s->xa, &state->tmp0, _state); reviseddualsimplex_basissolve(&state->basis, &state->tmp0, &state->tmp1, &state->tmp2, _state); for(i=0; i<=m-1; i++) { s->xa.ptr.p_double[state->basis.idx.ptr.p_int[i]] = -state->tmp1.ptr.p_double[i]; } /* * Compute D */ for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = s->effc.ptr.p_double[state->basis.idx.ptr.p_int[i]]; } reviseddualsimplex_basissolvet(&state->basis, &state->tmp0, &state->tmp1, &state->tmp2, _state); reviseddualsimplex_computeantv(state, &state->tmp1, &s->d, _state); for(i=0; i<=nn-1; i++) { j = state->basis.nidx.ptr.p_int[i]; s->d.ptr.p_double[j] = s->effc.ptr.p_double[j]-s->d.ptr.p_double[j]; } /* * Update state validity/age */ s->state = reviseddualsimplex_ssvalid; } /************************************************************************* This function performs initial dual feasibility correction on the subproblem. It assumes that problem state is at least ssValidXN. After call to this function the problem state is set to ssValid. This function returns dual feasibility error after dual feasibility correction. NOTE: this function expects that both nonbasic and basic components are stored in XA[]. XB[] array is not referenced. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static double reviseddualsimplex_initialdualfeasibilitycorrection(dualsimplexstate* state, dualsimplexsubproblem* s, dualsimplexsettings* settings, ae_state *_state) { ae_frame _frame_block; ae_vector dummy; ae_int_t nn; ae_int_t m; ae_int_t ii; ae_int_t i; ae_int_t j; ae_bool flipped; double v; double dj; double xj; ae_int_t bndt; double result; ae_frame_make(_state, &_frame_block); memset(&dummy, 0, sizeof(dummy)); ae_vector_init(&dummy, 0, DT_REAL, _state, ae_true); nn = s->ns; m = s->m; ae_assert(s->state>=reviseddualsimplex_ssvalidxn, "InitialDualFeasibilityCorrection: XN is invalid", _state); /* * Prepare */ rvectorsetlengthatleast(&state->dfctmp0, m, _state); rvectorsetlengthatleast(&state->dfctmp1, m, _state); /* * Recompute D[] using fresh factorization */ reviseddualsimplex_basisfreshtrf(&state->basis, &state->at, settings, _state); for(i=0; i<=m-1; i++) { state->dfctmp0.ptr.p_double[i] = s->effc.ptr.p_double[state->basis.idx.ptr.p_int[i]]; } reviseddualsimplex_basissolvet(&state->basis, &state->dfctmp0, &state->dfctmp1, &state->dfctmp2, _state); reviseddualsimplex_computeantv(state, &state->dfctmp1, &s->d, _state); for(i=0; i<=nn-1; i++) { j = state->basis.nidx.ptr.p_int[i]; s->d.ptr.p_double[j] = s->effc.ptr.p_double[j]-s->d.ptr.p_double[j]; } /* * Perform flips for dual-infeasible boxed variables */ result = (double)(0); flipped = ae_false; for(ii=0; ii<=nn-1; ii++) { j = state->basis.nidx.ptr.p_int[ii]; bndt = s->bndt.ptr.p_int[j]; /* * Boxed variables, perform DFC */ if( bndt==reviseddualsimplex_ccrange ) { dj = s->d.ptr.p_double[j]; xj = s->xa.ptr.p_double[j]; if( xj==s->bndl.ptr.p_double[j]&&dj<0 ) { s->xa.ptr.p_double[j] = s->bndu.ptr.p_double[j]; flipped = ae_true; continue; } if( xj==s->bndu.ptr.p_double[j]&&dj>0 ) { s->xa.ptr.p_double[j] = s->bndl.ptr.p_double[j]; flipped = ae_true; continue; } continue; } /* * Non-boxed variables, compute dual feasibility error */ if( bndt==reviseddualsimplex_ccfixed ) { continue; } if( bndt==reviseddualsimplex_cclower ) { v = -s->d.ptr.p_double[j]; if( v>result ) { result = v; } continue; } if( bndt==reviseddualsimplex_ccupper ) { v = s->d.ptr.p_double[j]; if( v>result ) { result = v; } continue; } if( bndt==reviseddualsimplex_ccfree ) { result = ae_maxreal(result, ae_fabs(s->d.ptr.p_double[j], _state), _state); continue; } } /* * Recompute basic components of X[] */ if( flipped||s->statexa, &state->dfctmp0, _state); reviseddualsimplex_basissolve(&state->basis, &state->dfctmp0, &state->dfctmp1, &state->dfctmp2, _state); for(i=0; i<=m-1; i++) { s->xa.ptr.p_double[state->basis.idx.ptr.p_int[i]] = -state->dfctmp1.ptr.p_double[i]; } } /* * Update state validity/age */ s->state = reviseddualsimplex_ssvalid; ae_frame_leave(_state); return result; } /************************************************************************* This function performs shifting using current algorithm as specified by settings.shifting. It accepts following parameters: * AlphaR - pivot row * Delta - delta from pricing step * Q - variable selected by ratio test * ThetaD - dual step length If no shifts are necessary, it silently returns. If shifts are necessary, it modifies ThetaD, S.D, S.EffC according to shifting algorithm. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_shifting(dualsimplexstate* state, dualsimplexsubproblem* s, /* Real */ ae_vector* alphar, double delta, ae_int_t q, double* thetad, dualsimplexsettings* settings, ae_state *_state) { ae_int_t dir; double sft; ae_int_t ii; ae_int_t j; ae_int_t bndt; /* * No shifts */ if( settings->shifting==0 ) { return; } /* * EXPAND with ThetaD=0 */ if( settings->shifting==1 ) { dir = ae_sign(delta, _state); if( ae_fp_greater_eq(*thetad*dir,(double)(0)) ) { return; } s->effc.ptr.p_double[q] = s->effc.ptr.p_double[q]-s->d.ptr.p_double[q]; s->d.ptr.p_double[q] = (double)(0); *thetad = (double)(0); return; } /* * EXPAND with ThetaD=ShiftLen */ if( settings->shifting==2 ) { dir = ae_sign(delta, _state); if( ae_fp_greater(*thetad*dir,(double)(0)) ) { return; } /* * Ensure that non-zero step is performed */ *thetad = dir*reviseddualsimplex_shiftlen; /* * Shift Q-th coefficient */ sft = *thetad*(dir*alphar->ptr.p_double[q])-s->d.ptr.p_double[q]; s->effc.ptr.p_double[q] = s->effc.ptr.p_double[q]+sft; s->d.ptr.p_double[q] = s->d.ptr.p_double[q]+sft; /* * Shift other coefficients */ for(ii=0; ii<=s->ns-1; ii++) { j = state->basis.nidx.ptr.p_int[ii]; bndt = s->bndt.ptr.p_int[j]; if( (j==q||bndt==reviseddualsimplex_ccfixed)||bndt==reviseddualsimplex_ccfree ) { continue; } sft = *thetad*(dir*alphar->ptr.p_double[j])-s->d.ptr.p_double[j]; /* * Handle variables at lower bound */ if( bndt==reviseddualsimplex_cclower||(bndt==reviseddualsimplex_ccrange&&s->xa.ptr.p_double[j]==s->bndl.ptr.p_double[j]) ) { sft = sft-reviseddualsimplex_dtol; if( sft>0 ) { s->effc.ptr.p_double[j] = s->effc.ptr.p_double[j]+sft; s->d.ptr.p_double[j] = s->d.ptr.p_double[j]+sft; } continue; } if( bndt==reviseddualsimplex_ccupper||(bndt==reviseddualsimplex_ccrange&&s->xa.ptr.p_double[j]==s->bndu.ptr.p_double[j]) ) { sft = sft+reviseddualsimplex_dtol; if( sft<0 ) { s->effc.ptr.p_double[j] = s->effc.ptr.p_double[j]+sft; s->d.ptr.p_double[j] = s->d.ptr.p_double[j]+sft; } continue; } } /* * Done */ return; } ae_assert(ae_false, "Shifting: unexpected shifting type", _state); } /************************************************************************* This function performs pricing step Additional parameters: * Phase1Pricing - if True, then special Phase #1 restriction is applied to leaving variables: only those are eligible which will move to zero bound after basis change. This trick allows to accelerate and stabilize phase #1. See Robert Fourer, 'Notes on the dual simplex method', draft report, 1994, for more info. Returns: * leaving variable index P * its index R in the basis, in [0,M) range * Delta - difference between variable value and corresponding bound NOTE: this function expects that basic components are stored in XB[]; corresponding entries of XA[] are ignored. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_pricingstep(dualsimplexstate* state, dualsimplexsubproblem* s, ae_bool phase1pricing, ae_int_t* p, ae_int_t* r, double* delta, dualsimplexsettings* settings, ae_state *_state) { ae_int_t m; ae_int_t i; ae_int_t bi; double v; double vtarget; double xbi; double bndl; double bndu; double vdiff; double vtest; double invw; double tol; ae_int_t bndt; ae_bool hasboth; ae_bool hasl; ae_bool hasu; *p = 0; *r = 0; *delta = 0; m = s->m; /* * Integrity checks */ ae_assert(s->state==reviseddualsimplex_ssvalid, "PricingStep: invalid X", _state); ae_assert(m>0, "PricingStep: M<=0", _state); /* * Pricing */ if( settings->pricing==0 ) { /* * "Most infeasible" pricing * * NOTE: VTarget is initialized by XTol because we want to skip * violations below XTol */ *p = -1; *r = -1; *delta = (double)(0); vtarget = reviseddualsimplex_xtol; for(i=0; i<=m-1; i++) { bndt = s->bndtb.ptr.p_int[i]; hasboth = bndt==3||bndt==0; hasl = hasboth||bndt==1; hasu = hasboth||bndt==2; xbi = s->xb.ptr.p_double[i]; if( hasl ) { bndl = s->bndlb.ptr.p_double[i]; vdiff = xbi-bndl; v = -vdiff; if( v>vtarget ) { /* * Special phase 1 pricing: do not choose variables which move to non-zero bound */ if( phase1pricing&&!(bndl==0.0) ) { continue; } /* * Proceed as usual */ *p = state->basis.idx.ptr.p_int[i]; *r = i; *delta = vdiff; vtarget = v; continue; } } if( hasu ) { bndu = s->bndub.ptr.p_double[i]; vdiff = xbi-bndu; v = vdiff; if( v>vtarget ) { /* * Special phase 1 pricing: do not choose variables which move to non-zero bound */ if( phase1pricing&&!(bndu==0.0) ) { continue; } /* * Proceed as usual */ *p = state->basis.idx.ptr.p_int[i]; *r = i; *delta = vdiff; vtarget = v; continue; } } } return; } if( settings->pricing==-1||settings->pricing==1 ) { /* * Dual steepest edge pricing */ reviseddualsimplex_basisrequestweights(&state->basis, settings, _state); *p = -1; *r = -1; *delta = (double)(0); vtarget = (double)(0); for(i=0; i<=m-1; i++) { bi = state->basis.idx.ptr.p_int[i]; bndt = s->bndtb.ptr.p_int[i]; hasboth = bndt==3||bndt==0; hasl = hasboth||bndt==1; hasu = hasboth||bndt==2; xbi = s->xb.ptr.p_double[i]; tol = reviseddualsimplex_xtol; invw = 1/state->basis.dseweights.ptr.p_double[i]; if( hasl ) { bndl = s->bndlb.ptr.p_double[i]; vdiff = xbi-bndl; vtest = vdiff*vdiff*invw; if( vdiff<-tol&&(*p<0||vtest>vtarget) ) { /* * Special phase 1 pricing: do not choose variables which move to non-zero bound */ if( phase1pricing&&!(bndl==0.0) ) { continue; } /* * Proceed as usual */ *p = bi; *r = i; *delta = vdiff; vtarget = vtest; continue; } } if( hasu ) { bndu = s->bndub.ptr.p_double[i]; vdiff = xbi-bndu; vtest = vdiff*vdiff*invw; if( vdiff>tol&&(*p<0||vtest>vtarget) ) { /* * Special phase 1 pricing: do not choose variables which move to non-zero bound */ if( phase1pricing&&!(bndu==0.0) ) { continue; } /* * Proceed as usual */ *p = bi; *r = i; *delta = vdiff; vtarget = vtest; continue; } } } return; } ae_assert(ae_false, "PricingStep: unknown pricing type", _state); } /************************************************************************* This function performs ratio test, either simple one or BFRT. It accepts following parameters: * AlphaR - pivot row * Delta - delta from pricing step * P - index of leaving variable from pricing step It returns following results: * Q - non-negative value for success, negative for primal infeasible problem * ThetaD - dual step length * PossibleFlips[PossibleFlipsCnt] - for possible flip indexes (for BFRT this set coincides with actual flips, but stabilizing BFRT is a bit more complex - some variables in PossibleFlips[] may need flipping and some not) Internally it uses following fields of State for temporaries: * EligibleSet -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_ratiotest(dualsimplexstate* state, dualsimplexsubproblem* s, /* Real */ ae_vector* alphar, double delta, ae_int_t p, ae_int_t* q, double* thetad, /* Integer */ ae_vector* possibleflips, ae_int_t* possibleflipscnt, dualsimplexsettings* settings, ae_state *_state) { ae_int_t nx; ae_int_t nn; ae_int_t i; ae_int_t j; ae_int_t nj; ae_int_t jj; ae_int_t dir; double sinv; double vx; double vp; double vtarget; double vtest; ae_int_t eligiblecnt; ae_int_t bndt; double alphawaver; double adelta; ae_int_t idx; double vtheta; double thetamax; ae_int_t harrissetsize; ae_bool hasnonboxedvars; *q = 0; *thetad = 0; nx = s->ns+s->m; nn = s->ns; ae_assert(ae_fp_neq(delta,(double)(0)), "RatioTest: zero delta", _state); ae_assert(s->state==reviseddualsimplex_ssvalid, "RatioTest: invalid X", _state); /* * Prepare temporaries * * Scaled tolerances are used to test AlphaWaveR for positivity/negativity, * scale of I-th tolerance is calculated as ratio of ColScale[I] and ColScale[P]. */ dir = ae_sign(delta, _state); sinv = 1.0/s->colscales.ptr.p_double[p]; ivectorsetlengthatleast(possibleflips, nx, _state); /* * Prepare set of eligible variables * * NOTE: free variables are immediately chosen at this stage */ ivectorsetlengthatleast(&state->eligibleset, nn, _state); eligiblecnt = 0; for(j=0; j<=nn-1; j++) { nj = state->basis.nidx.ptr.p_int[j]; bndt = s->bndt.ptr.p_int[nj]; /* * Handle fixed and free variables: fixed ones are not eligible, * free non-basic variables are always and immediately eligible */ if( bndt==reviseddualsimplex_ccfixed ) { continue; } if( bndt==reviseddualsimplex_ccfree ) { *q = nj; *thetad = (double)(0); return; } /* * Handle lower/upper/range constraints */ vx = s->xa.ptr.p_double[nj]; vp = settings->pivottol*(s->colscales.ptr.p_double[nj]*sinv); alphawaver = dir*alphar->ptr.p_double[nj]; if( bndt==reviseddualsimplex_cclower||(bndt==reviseddualsimplex_ccrange&&vx==s->bndl.ptr.p_double[nj]) ) { if( alphawaver>vp ) { state->eligibleset.ptr.p_int[eligiblecnt] = nj; eligiblecnt = eligiblecnt+1; continue; } } if( bndt==reviseddualsimplex_ccupper||(bndt==reviseddualsimplex_ccrange&&vx==s->bndu.ptr.p_double[nj]) ) { if( alphawaver<-vp ) { state->eligibleset.ptr.p_int[eligiblecnt] = nj; eligiblecnt = eligiblecnt+1; continue; } } } /* * Simple ratio test. */ if( settings->ratiotest==0 ) { *q = -1; *thetad = (double)(0); vtarget = (double)(0); *possibleflipscnt = 0; for(j=0; j<=eligiblecnt-1; j++) { nj = state->eligibleset.ptr.p_int[j]; /* * More general case */ alphawaver = dir*alphar->ptr.p_double[nj]; vtest = s->d.ptr.p_double[nj]/alphawaver; if( *q<0||vtestd.ptr.p_double[nj]/alphar->ptr.p_double[nj]; } } reviseddualsimplex_shifting(state, s, alphar, delta, *q, thetad, settings, _state); return; } /* * Bounds flipping ratio test */ if( settings->ratiotest==1 ) { *q = -1; *thetad = (double)(0); *possibleflipscnt = 0; adelta = ae_fabs(delta, _state); /* * Quick exit */ if( eligiblecnt==0 ) { return; } /* * BFRT */ while(eligiblecnt>0) { /* * Find Q satisfying BFRT criteria */ idx = -1; *q = -1; vtarget = (double)(0); for(j=0; j<=eligiblecnt-1; j++) { nj = state->eligibleset.ptr.p_int[j]; vtheta = s->d.ptr.p_double[nj]/alphar->ptr.p_double[nj]; vtest = dir*vtheta; if( *q<0||vtest=0, "RatioTest: integrity check failed (BFRT)", _state); /* * BFRT mini-iterations will be terminated upon discovery * of non-boxed variable or upon exhausting of eligible set. */ if( s->bndt.ptr.p_int[*q]!=reviseddualsimplex_ccrange ) { break; } if( eligiblecnt==1 ) { break; } /* * Update and test ADelta. Break BFRT mini-iterations once * we get negative slope. */ adelta = adelta-(s->bndu.ptr.p_double[*q]-s->bndl.ptr.p_double[*q])*ae_fabs(alphar->ptr.p_double[*q], _state); if( ae_fp_less_eq(adelta,(double)(0)) ) { break; } /* * Update eligible set, record flip */ ae_assert(state->eligibleset.ptr.p_int[idx]==(*q), "RatioTest: unexpected failure", _state); possibleflips->ptr.p_int[*possibleflipscnt] = *q; *possibleflipscnt = *possibleflipscnt+1; state->eligibleset.ptr.p_int[idx] = state->eligibleset.ptr.p_int[eligiblecnt-1]; eligiblecnt = eligiblecnt-1; } ae_assert(*q>=0, "RatioTest: unexpected failure", _state); *thetad = s->d.ptr.p_double[*q]/alphar->ptr.p_double[*q]; reviseddualsimplex_shifting(state, s, alphar, delta, *q, thetad, settings, _state); return; } /* * Stabilizing bounds flipping ratio test */ if( settings->ratiotest==2 ) { *q = -1; *thetad = (double)(0); *possibleflipscnt = 0; adelta = ae_fabs(delta, _state); ivectorgrowto(&state->harrisset, eligiblecnt, _state); /* * Quick exit */ if( eligiblecnt==0 ) { return; } /* * BFRT */ while(eligiblecnt>0) { /* * Determine ThetaMax according to stabilizing BFRT */ thetamax = ae_maxrealnumber; for(j=0; j<=eligiblecnt-1; j++) { nj = state->eligibleset.ptr.p_int[j]; alphawaver = dir*alphar->ptr.p_double[nj]; if( alphawaver>0 ) { vtest = (s->d.ptr.p_double[nj]+reviseddualsimplex_dtol)/alphawaver; } else { vtest = (s->d.ptr.p_double[nj]-reviseddualsimplex_dtol)/alphawaver; } if( vtesteligibleset.ptr.p_int[j]; alphawaver = dir*alphar->ptr.p_double[nj]; if( s->d.ptr.p_double[nj]/alphawaver>thetamax ) { continue; } state->harrisset.ptr.p_int[harrissetsize] = nj; harrissetsize = harrissetsize+1; hasnonboxedvars = hasnonboxedvars||s->bndt.ptr.p_int[nj]!=reviseddualsimplex_ccrange; vtest = ae_fabs(alphawaver, _state); if( vtest>vtarget ) { *q = nj; vtarget = vtest; } } ae_assert(*q>=0, "RatioTest: integrity check failed (Harris set selection)", _state); if( harrissetsize==eligiblecnt ) { break; } /* * Remove Harris set from the eligible set */ j = 0; jj = 0; for(i=0; i<=eligiblecnt-1; i++) { if( j==harrissetsize||state->eligibleset.ptr.p_int[i]!=state->harrisset.ptr.p_int[j] ) { /* * I-th element not present in Harris set, leave it in the eligible set */ state->eligibleset.ptr.p_int[jj] = state->eligibleset.ptr.p_int[i]; jj = jj+1; } else { /* * I-th element is present in Harris set, skip it */ j = j+1; } } eligiblecnt = eligiblecnt-harrissetsize; ae_assert(j==harrissetsize, "RatioTest: integrity check failed", _state); ae_assert(jj==eligiblecnt, "RatioTest: integrity check failed", _state); /* * Update and test |delta|. * * Break BFRT mini-iterations once we get negative slope. */ for(j=0; j<=harrissetsize-1; j++) { nj = state->harrisset.ptr.p_int[j]; if( !hasnonboxedvars||s->bndt.ptr.p_int[nj]==reviseddualsimplex_ccrange ) { adelta = adelta-(s->bndu.ptr.p_double[nj]-s->bndl.ptr.p_double[nj])*ae_fabs(alphar->ptr.p_double[nj], _state); } else { adelta = (double)(-1); } } if( ae_fp_less_eq(adelta,(double)(0)) ) { break; } ae_assert(!hasnonboxedvars, "RatioTest: integrity check failed", _state); for(j=0; j<=harrissetsize-1; j++) { possibleflips->ptr.p_int[*possibleflipscnt] = state->harrisset.ptr.p_int[j]; *possibleflipscnt = *possibleflipscnt+1; } } ae_assert(*q>=0, "RatioTest: unexpected failure", _state); if( eligiblecnt==0&&adelta<0 ) { /* * Eligible set exhausted, declare dual unboundedness */ *q = -1; *thetad = (double)(0); return; } *thetad = s->d.ptr.p_double[*q]/alphar->ptr.p_double[*q]; reviseddualsimplex_shifting(state, s, alphar, delta, *q, thetad, settings, _state); return; } /* * Unknown test type */ ae_assert(ae_false, "RatioTest: integrity check failed, unknown test type", _state); } /************************************************************************* This function performs update of XB, XN, D and Z during final step of revised dual simplex method. It also updates basis cache of the subproblem (s.bcache field). Depending on Settings.RatioTest, following operations are performed: * Settings.RatioTest=0 -> simple update is performed * Settings.RatioTest=1 -> bounds flipping ratio test update is performed * Settings.RatioTest=2 -> stabilizing bounds flipping ratio test update is performed It accepts following parameters: * P - index of leaving variable from pricing step * Q - index of entering variable. * R - index of leaving variable in AlphaQ * Delta - delta from pricing step * AlphaPiv - pivot element (in absence of numerical rounding it is AlphaR[Q]=AlphaQ[R]) * ThetaP - primal step length * ThetaD - dual step length * AlphaQ - pivot column * AlphaQim - intermediate result from Ftran for AlphaQ, used for Forest-Tomlin update, not referenced when other update scheme is set * AlphaR - pivot row * Tau - tau-vector for DSE pricing (ignored if simple pricing is used) * PossibleFlips, PossibleFlipsCnt - outputs of the RatioTest(), information about possible variable flips (however, we have to check residual costs before actually flipping variables - it is possible that some variables in this set actually do not need flipping) It performs following operations: * basis update * update of XB/BndTB/BndLB/BndUB[] and XA[] (basic and nonbasic components), D * update of pricing weights -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_updatestep(dualsimplexstate* state, dualsimplexsubproblem* s, ae_int_t p, ae_int_t q, ae_int_t r, double delta, double alphapiv, double thetap, double thetad, /* Real */ ae_vector* alphaq, /* Real */ ae_vector* alphaqim, /* Real */ ae_vector* alphar, /* Real */ ae_vector* tau, /* Integer */ ae_vector* possibleflips, ae_int_t possibleflipscnt, dualsimplexsettings* settings, ae_state *_state) { ae_int_t nn; ae_int_t nx; ae_int_t m; ae_int_t ii; ae_int_t j; ae_int_t k; ae_int_t k0; ae_int_t k1; double bndl; double bndu; ae_bool flipped; double flip; double dj; ae_int_t dir; ae_int_t idx; ae_int_t actualflipscnt; nn = s->ns; nx = s->ns+s->m; m = s->m; /* * Integrity checks */ ae_assert((settings->ratiotest==0||settings->ratiotest==1)||settings->ratiotest==2, "UpdateStep: invalid X", _state); ae_assert(s->state==reviseddualsimplex_ssvalid, "UpdateStep: invalid X", _state); ae_assert(p>=0&&q>=0, "UpdateStep: invalid P/Q", _state); ae_assert(ae_fp_neq(delta,(double)(0)), "UpdateStep: Delta=0", _state); ae_assert(ae_fp_neq(alphapiv,(double)(0)), "UpdateStep: AlphaPiv=0", _state); /* * Prepare */ dir = ae_sign(delta, _state); flip = (double)(0); rvectorsetlengthatleast(&state->tmp0, m, _state); for(k=0; k<=m-1; k++) { state->tmp0.ptr.p_double[k] = (double)(0); } ivectorsetlengthatleast(&state->ustmpi, nx, _state); actualflipscnt = 0; /* * Evaluate and update non-basic elements of D */ for(ii=0; ii<=nn-1; ii++) { j = state->basis.nidx.ptr.p_int[ii]; s->d.ptr.p_double[j] = s->d.ptr.p_double[j]-thetad*state->alphar.ptr.p_double[j]; } for(ii=0; ii<=possibleflipscnt-1; ii++) { j = possibleflips->ptr.p_int[ii]; dj = s->d.ptr.p_double[j]; bndl = s->bndl.ptr.p_double[j]; bndu = s->bndu.ptr.p_double[j]; flipped = ae_false; if( s->xa.ptr.p_double[j]==bndl&&dj<0 ) { flip = bndu-bndl; flipped = ae_true; } else { if( s->xa.ptr.p_double[j]==bndu&&dj>0 ) { flip = bndl-bndu; flipped = ae_true; } } if( flipped ) { delta = delta-dir*(bndu-bndl)*ae_fabs(alphar->ptr.p_double[j], _state); state->ustmpi.ptr.p_int[actualflipscnt] = j; actualflipscnt = actualflipscnt+1; k0 = state->at.ridx.ptr.p_int[j]; k1 = state->at.ridx.ptr.p_int[j+1]-1; for(k=k0; k<=k1; k++) { idx = state->at.idx.ptr.p_int[k]; state->tmp0.ptr.p_double[idx] = state->tmp0.ptr.p_double[idx]+flip*state->at.vals.ptr.p_double[k]; } } } s->d.ptr.p_double[p] = -thetad; s->d.ptr.p_double[q] = 0.0; /* * Apply BFRT update (aka long dual step) or simple ratio update */ if( actualflipscnt>0 ) { thetap = delta/alphapiv; k0 = state->at.ridx.ptr.p_int[q]; k1 = state->at.ridx.ptr.p_int[q+1]-1; for(k=k0; k<=k1; k++) { idx = state->at.idx.ptr.p_int[k]; state->tmp0.ptr.p_double[idx] = state->tmp0.ptr.p_double[idx]+thetap*state->at.vals.ptr.p_double[k]; } reviseddualsimplex_basissolve(&state->basis, &state->tmp0, &state->tmp1, &state->tmp2, _state); for(j=0; j<=m-1; j++) { s->xb.ptr.p_double[j] = s->xb.ptr.p_double[j]-state->tmp1.ptr.p_double[j]; } for(ii=0; ii<=actualflipscnt-1; ii++) { j = state->ustmpi.ptr.p_int[ii]; if( s->xa.ptr.p_double[j]==s->bndl.ptr.p_double[j] ) { s->xa.ptr.p_double[j] = s->bndu.ptr.p_double[j]; } else { s->xa.ptr.p_double[j] = s->bndl.ptr.p_double[j]; } } s->xb.ptr.p_double[r] = s->xa.ptr.p_double[q]+thetap; if( dir<0 ) { s->xa.ptr.p_double[p] = s->bndl.ptr.p_double[p]; } else { s->xa.ptr.p_double[p] = s->bndu.ptr.p_double[p]; } } else { for(j=0; j<=m-1; j++) { s->xb.ptr.p_double[j] = s->xb.ptr.p_double[j]-thetap*state->alphaq.ptr.p_double[j]; } s->xb.ptr.p_double[r] = s->xa.ptr.p_double[q]+thetap; if( dir<0 ) { s->xa.ptr.p_double[p] = s->bndl.ptr.p_double[p]; } else { s->xa.ptr.p_double[p] = s->bndu.ptr.p_double[p]; } } /* * Update basis */ reviseddualsimplex_basisupdatetrf(&state->basis, &state->at, p, q, &state->alphaq, &state->alphaqim, r, tau, settings, _state); /* * Update cached variables */ s->bndlb.ptr.p_double[r] = s->bndl.ptr.p_double[q]; s->bndub.ptr.p_double[r] = s->bndu.ptr.p_double[q]; s->bndtb.ptr.p_int[r] = s->bndt.ptr.p_int[q]; } /************************************************************************* This function performs several checks for accumulation of errors during factorization update. It returns True if refactorization is advised. -- ALGLIB -- Copyright 24.01.2019 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_refactorizationrequired(dualsimplexstate* state, dualsimplexsubproblem* s, ae_int_t q, ae_int_t r, ae_state *_state) { ae_int_t m; ae_int_t i; double mx; double v; ae_bool result; m = s->m; result = ae_false; /* * Quick exit */ if( state->basis.trfage<=reviseddualsimplex_safetrfage ) { return result; } /* * Compare Q-th entry of the pivot row AlphaR with R-th entry of the AlphaQ; * ideally, both should match exactly. The difference is a rough estimate * of the magnitude of the numerical errors. */ mx = 0.0; for(i=0; i<=m-1; i++) { v = state->alphaq.ptr.p_double[i]; v = v*v; if( v>mx ) { mx = v; } } mx = ae_sqrt(mx, _state); result = result||ae_fp_greater(ae_fabs(state->alphaq.ptr.p_double[r]-state->alphar.ptr.p_double[q], _state),reviseddualsimplex_alphatrigger*(1.0+mx)); result = result||ae_fp_greater(ae_fabs(state->alphaq.ptr.p_double[r]-state->alphar.ptr.p_double[q], _state),reviseddualsimplex_alphatrigger2*ae_fabs(state->alphar.ptr.p_double[q], _state)); return result; } /************************************************************************* This function performs actual solution of dual simplex subproblem (either primary one or phase 1 one). A problem with following properties is expected: * M>0 * feasible box constraints * dual feasible initial basis * actual initial point XC and target value Z * actual reduced cost vector D * pricing weights being set to 1.0 or copied from previous problem Returns: * Info = +1 for success, -3 for infeasible * IterationsCount is increased by amount of iterations performed NOTE: this function internally uses separate storage of basic and nonbasic components; however, all inputs and outputs use single array S.XA[] to store both basic and nonbasic variables. It transparently splits variables on input and recombines them on output. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_solvesubproblemdual(dualsimplexstate* state, dualsimplexsubproblem* s, ae_bool isphase1, dualsimplexsettings* settings, ae_int_t* info, ae_state *_state) { ae_int_t nx; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t p; ae_int_t r; ae_int_t q; double alphapiv; double thetad; double thetap; double delta; ae_int_t forcedrestarts; ae_int_t j0; ae_int_t j1; *info = 0; nx = s->ns+s->m; m = s->m; forcedrestarts = 0; /* * Integrity checks */ ae_assert(s->state==reviseddualsimplex_ssvalid, "SolveSubproblemDual: X is not valid", _state); ae_assert(m>0, "SolveSubproblemDual: M<=0", _state); for(i=0; i<=nx-1; i++) { ae_assert(s->bndt.ptr.p_int[i]!=reviseddualsimplex_ccinfeasible, "SolveSubproblemDual: infeasible box constraints", _state); } ae_assert(reviseddualsimplex_isdualfeasible(state, s, _state), "SolveSubproblemDual: dual infeasible initial basis", _state); /* * Actual processing */ reviseddualsimplex_offloadbasiccomponents(s, &state->basis, _state); *info = 0; rvectorsetlengthatleast(&state->tmp0, m, _state); for(;;) { /* * Pricing */ reviseddualsimplex_pricingstep(state, s, isphase1, &p, &r, &delta, settings, _state); if( ae_fp_eq(delta,(double)(0)) ) { /* * Solved! Feasible and bounded! */ reviseddualsimplex_recombinebasicnonbasicx(s, &state->basis, _state); *info = 1; return; } /* * BTran */ for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = (double)(0); } state->tmp0.ptr.p_double[r] = (double)(1); reviseddualsimplex_basissolvet(&state->basis, &state->tmp0, &state->rhor, &state->tmp2, _state); /* * Pivot row */ reviseddualsimplex_computeantv(state, &state->rhor, &state->alphar, _state); /* * Ratio test */ reviseddualsimplex_ratiotest(state, s, &state->alphar, delta, p, &q, &thetad, &state->possibleflips, &state->possibleflipscnt, settings, _state); if( q<0 ) { /* * Do we have fresh factorization and state? If not, * refresh them prior to declaring that we have no solution. */ if( state->basis.trfage>0&&forcedrestartsbasis, &state->at, settings, _state); reviseddualsimplex_subproblemhandlexnupdate(state, s, _state); reviseddualsimplex_offloadbasiccomponents(s, &state->basis, _state); inc(&forcedrestarts, _state); continue; } /* * Dual unbounded, primal infeasible */ reviseddualsimplex_recombinebasicnonbasicx(s, &state->basis, _state); *info = -3; return; } alphapiv = state->alphar.ptr.p_double[q]; /* * FTran * * NOTE: AlphaQim is filled by intermediate FTran result which is useful * for Forest-Tomlin update scheme. If not Forest-Tomlin update is * used, then it is not set. */ for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = (double)(0); } j0 = state->at.ridx.ptr.p_int[q]; j1 = state->at.ridx.ptr.p_int[q+1]-1; for(j=j0; j<=j1; j++) { state->tmp0.ptr.p_double[state->at.idx.ptr.p_int[j]] = state->at.vals.ptr.p_double[j]; } reviseddualsimplex_basissolvex(&state->basis, &state->tmp0, &state->alphaq, &state->alphaqim, ae_true, &state->tmp2, _state); /* * Check numerical accuracy, trigger refactorization if needed */ if( reviseddualsimplex_refactorizationrequired(state, s, q, r, _state) ) { reviseddualsimplex_basisfreshtrf(&state->basis, &state->at, settings, _state); reviseddualsimplex_subproblemhandlexnupdate(state, s, _state); reviseddualsimplex_offloadbasiccomponents(s, &state->basis, _state); continue; } /* * One more FTRan for DSE weights (if needed) */ ae_assert((settings->pricing==-1||settings->pricing==0)||settings->pricing==1, "SolveSubproblemDual: unexpected Settings.Pricing", _state); if( settings->pricing==1 ) { reviseddualsimplex_basissolve(&state->basis, &state->rhor, &state->tau, &state->tmp2, _state); } /* * Basis change and update */ thetap = delta/alphapiv; reviseddualsimplex_updatestep(state, s, p, q, r, delta, alphapiv, thetap, thetad, &state->alphaq, &state->alphaqim, &state->alphar, &state->tau, &state->possibleflips, state->possibleflipscnt, settings, _state); inc(&state->repiterationscount, _state); if( isphase1 ) { inc(&state->repiterationscount1, _state); } else { inc(&state->repiterationscount2, _state); } } } /************************************************************************* This function solves simplex subproblem using primal simplex method. A problem with following properties is expected: * M>0 * feasible box constraints * primal feasible initial basis * actual initial point XC and target value Z * actual reduced cost vector D * pricing weights being set to 1.0 or copied from previous problem Returns: * Info = +1 for success, -3 for infeasible * IterationsCount is increased by amount of iterations performed -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_solvesubproblemprimal(dualsimplexstate* state, dualsimplexsubproblem* s, dualsimplexsettings* settings, ae_int_t* info, ae_state *_state) { ae_int_t nn; ae_int_t nx; ae_int_t m; ae_int_t i; ae_int_t j; double v; double vmax; ae_int_t bi; double dj; ae_int_t bndt; ae_int_t q; ae_int_t p; ae_int_t r; ae_int_t dir; double lim; ae_bool haslim; double thetap; double xbnd; double flip; ae_int_t canddir; double candlim; double candflip; ae_int_t j0; ae_int_t j1; double alphawave; double vp; double vb; double vx; double vtest; double vv; *info = 0; nn = s->ns; nx = s->ns+s->m; m = s->m; /* * Integrity checks */ ae_assert(s->state==reviseddualsimplex_ssvalid, "SolveSubproblemPrimal: X is not valid", _state); ae_assert(m>0, "SolveSubproblemPrimal: M<=0", _state); for(i=0; i<=nx-1; i++) { ae_assert(s->bndt.ptr.p_int[i]!=reviseddualsimplex_ccinfeasible, "SolveSubproblemPrimal: infeasible box constraints", _state); } /* * Actual processing */ *info = 1; rvectorsetlengthatleast(&state->tmp0, m, _state); for(;;) { /* * Primal simplex pricing step: we implement the very basic version * of the pricing step because it is expected that primal simplex method * is used just to apply quick correction after removal of the perturbation. */ q = -1; vmax = (double)(0); dir = 0; lim = ae_maxrealnumber; haslim = ae_false; flip = (double)(0); canddir = 0; for(i=0; i<=nn-1; i++) { j = state->basis.nidx.ptr.p_int[i]; dj = s->d.ptr.p_double[j]; bndt = s->bndt.ptr.p_int[j]; if( bndt==reviseddualsimplex_ccfixed ) { continue; } if( bndt==reviseddualsimplex_ccrange ) { v = (double)(0); candlim = s->bndu.ptr.p_double[j]-s->bndl.ptr.p_double[j]; candflip = (double)(0); if( s->xa.ptr.p_double[j]==s->bndl.ptr.p_double[j] ) { v = -dj; canddir = 1; candflip = s->bndu.ptr.p_double[j]; } if( s->xa.ptr.p_double[j]==s->bndu.ptr.p_double[j] ) { v = dj; canddir = -1; candflip = s->bndl.ptr.p_double[j]; } if( v>vmax ) { vmax = v; dir = canddir; lim = candlim; haslim = ae_true; flip = candflip; q = j; } continue; } v = (double)(0); canddir = 0; if( bndt==reviseddualsimplex_cclower ) { v = -dj; canddir = 1; } if( bndt==reviseddualsimplex_ccupper ) { v = dj; canddir = -1; } if( bndt==reviseddualsimplex_ccfree ) { v = ae_fabs(dj, _state); canddir = -ae_sign(dj, _state); } if( v>vmax ) { vmax = v; dir = canddir; lim = ae_maxrealnumber; haslim = ae_false; q = j; } continue; } if( vmax<=reviseddualsimplex_dtol ) { /* * Solved: primal and dual feasible! */ return; } ae_assert(q>=0, "SolveSubproblemPrimal: integrity check failed", _state); /* * FTran and textbook ratio test (again, we expect primal phase to terminate quickly) * * NOTE: AlphaQim is filled by intermediate FTran result which is useful * for Forest-Tomlin update scheme. If not Forest-Tomlin update is * used, then it is not set. */ for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = (double)(0); } j0 = state->at.ridx.ptr.p_int[q]; j1 = state->at.ridx.ptr.p_int[q+1]-1; for(j=j0; j<=j1; j++) { state->tmp0.ptr.p_double[state->at.idx.ptr.p_int[j]] = state->at.vals.ptr.p_double[j]; } reviseddualsimplex_basissolvex(&state->basis, &state->tmp0, &state->alphaq, &state->alphaqim, ae_true, &state->tmp2, _state); vp = settings->pivottol; p = -1; r = -1; thetap = (double)(0); xbnd = (double)(0); for(i=0; i<=m-1; i++) { bi = state->basis.idx.ptr.p_int[i]; alphawave = -dir*state->alphaq.ptr.p_double[i]; vx = s->xa.ptr.p_double[bi]; if( alphawave<-vp&&reviseddualsimplex_hasbndl(s, bi, _state) ) { vb = s->bndl.ptr.p_double[bi]; if( vx<=vb ) { /* * X[Bi] is already out of bounds due to rounding errors, perform shifting */ vb = vx-reviseddualsimplex_shiftlen; s->bndl.ptr.p_double[bi] = vx; } vtest = (vb-vx)/alphawave; if( p<0||vtestvp&&reviseddualsimplex_hasbndu(s, bi, _state) ) { vb = s->bndu.ptr.p_double[bi]; if( vx>=vb ) { /* * X[Bi] is already out of bounds due to rounding errors, perform shifting */ vb = vx+reviseddualsimplex_shiftlen; s->bndu.ptr.p_double[bi] = vb; } vtest = (vb-vx)/alphawave; if( p<0||vtest=0&&(!haslim||thetaptmp0, m, _state); for(i=0; i<=m-1; i++) { bi = state->basis.idx.ptr.p_int[i]; vv = thetap*(dir*state->alphaq.ptr.p_double[i]); s->xa.ptr.p_double[bi] = s->xa.ptr.p_double[bi]-vv; } s->xa.ptr.p_double[p] = xbnd; s->xa.ptr.p_double[q] = s->xa.ptr.p_double[q]+dir*thetap; for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = (double)(0); } reviseddualsimplex_basisupdatetrf(&state->basis, &state->at, p, q, &state->alphaq, &state->alphaqim, r, &state->tmp0, settings, _state); for(i=0; i<=m-1; i++) { state->tmp0.ptr.p_double[i] = s->effc.ptr.p_double[state->basis.idx.ptr.p_int[i]]; } reviseddualsimplex_basissolvet(&state->basis, &state->tmp0, &state->tmp1, &state->tmp2, _state); reviseddualsimplex_computeantv(state, &state->tmp1, &s->d, _state); for(i=0; i<=nn-1; i++) { j = state->basis.nidx.ptr.p_int[i]; s->d.ptr.p_double[j] = s->effc.ptr.p_double[j]-s->d.ptr.p_double[j]; } } else { /* * Basis does not change because Qth variable flips from one bound * to another one long before we encounter the boundary */ s->xa.ptr.p_double[q] = flip; for(i=0; i<=m-1; i++) { bi = state->basis.idx.ptr.p_int[i]; vv = lim*(dir*state->alphaq.ptr.p_double[i]); s->xa.ptr.p_double[bi] = s->xa.ptr.p_double[bi]-vv; } } inc(&state->repiterationscount, _state); inc(&state->repiterationscount3, _state); } } /************************************************************************* This function estimates feasibility properties of the current basis and invokes phase 1 if necessary. A problem with following properties is expected: * M>0 * feasible box constraints * some initial basis (can be dual infeasible) with actual factorization * actual initial point XC and target value Z * actual reduced cost vector D It returns: * +1 if dual feasible basis was found * -4 if problem is dual infeasible -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_invokephase1(dualsimplexstate* state, dualsimplexsettings* settings, ae_state *_state) { ae_int_t m; m = state->primary.m; state->repterminationtype = 0; /* * Integrity checks */ ae_assert(state->primary.state==reviseddualsimplex_ssvalid, "InvokePhase1: invalid primary X", _state); ae_assert(m>0, "InvokePhase1: M<=0", _state); /* * Is it dual feasible from the very beginning (or maybe after initial DFC)? */ if( ae_fp_less_eq(reviseddualsimplex_initialdualfeasibilitycorrection(state, &state->primary, settings, _state),reviseddualsimplex_dtol) ) { state->repterminationtype = 1; return; } /* * Solve phase #1 subproblem */ reviseddualsimplex_subprobleminitphase1(&state->primary, &state->basis, &state->phase1, _state); reviseddualsimplex_initialdualfeasibilitycorrection(state, &state->phase1, settings, _state); reviseddualsimplex_solvesubproblemdual(state, &state->phase1, ae_true, settings, &state->repterminationtype, _state); ae_assert(state->repterminationtype>0, "DualSimplexSolver: unexpected failure of phase #1", _state); state->repterminationtype = 1; /* * Setup initial basis for phase #2 using solution of phase #1 */ reviseddualsimplex_subprobleminferinitialxn(state, &state->primary, _state); if( ae_fp_greater(reviseddualsimplex_initialdualfeasibilitycorrection(state, &state->primary, settings, _state),reviseddualsimplex_dtol) ) { state->repterminationtype = -4; return; } state->repterminationtype = 1; } /************************************************************************* Box-constrained solver; sets State.RepX, State.RepStats and State.RepTerminationType, does not change other fields. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_solveboxonly(dualsimplexstate* state, ae_state *_state) { ae_int_t i; ae_int_t ns; ns = state->primary.ns; ae_assert(state->primary.m==0, "SolveBoxOnly: integrity check failed", _state); for(i=0; i<=ns-1; i++) { /* * Handle infeasible variable */ if( state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccinfeasible ) { state->repterminationtype = -3; reviseddualsimplex_setzeroxystats(state, _state); return; } /* * Handle fixed variable */ if( state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccfixed ) { state->repx.ptr.p_double[i] = state->primary.bndl.ptr.p_double[i]; state->repstats.ptr.p_int[i] = -1; continue; } /* * Handle non-zero cost component */ if( ae_fp_greater(state->primary.rawc.ptr.p_double[i],(double)(0)) ) { if( state->primary.bndt.ptr.p_int[i]!=reviseddualsimplex_ccrange&&state->primary.bndt.ptr.p_int[i]!=reviseddualsimplex_cclower ) { state->repterminationtype = -4; reviseddualsimplex_setzeroxystats(state, _state); return; } state->repx.ptr.p_double[i] = state->primary.bndl.ptr.p_double[i]; state->repstats.ptr.p_int[i] = -1; continue; } if( ae_fp_less(state->primary.rawc.ptr.p_double[i],(double)(0)) ) { if( state->primary.bndt.ptr.p_int[i]!=reviseddualsimplex_ccrange&&state->primary.bndt.ptr.p_int[i]!=reviseddualsimplex_ccupper ) { state->repterminationtype = -4; reviseddualsimplex_setzeroxystats(state, _state); return; } state->repx.ptr.p_double[i] = state->primary.bndu.ptr.p_double[i]; state->repstats.ptr.p_int[i] = 1; continue; } /* * Handle non-free variable with zero cost component */ if( state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccupper||state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccrange ) { state->repx.ptr.p_double[i] = state->primary.bndu.ptr.p_double[i]; state->repstats.ptr.p_int[i] = 1; continue; } if( state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_cclower ) { state->repx.ptr.p_double[i] = state->primary.bndl.ptr.p_double[i]; state->repstats.ptr.p_int[i] = -1; continue; } /* * Free variable, zero cost component */ ae_assert(state->primary.bndt.ptr.p_int[i]==reviseddualsimplex_ccfree, "DSSOptimize: integrity check failed", _state); state->repx.ptr.p_double[i] = (double)(0); state->repstats.ptr.p_int[i] = 0; } } /************************************************************************* Zero-fill RepX, RepY, RepStats. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_setzeroxystats(dualsimplexstate* state, ae_state *_state) { ae_int_t i; for(i=0; i<=state->primary.ns-1; i++) { state->repx.ptr.p_double[i] = (double)(0); } for(i=0; i<=state->primary.m-1; i++) { state->repy.ptr.p_double[i] = (double)(0); state->repdx.ptr.p_double[i] = (double)(0); } for(i=0; i<=state->primary.ns+state->primary.m-1; i++) { state->repstats.ptr.p_int[i] = 0; } } /************************************************************************* This function initializes basis structure; no triangular factorization is prepared yet. Previously allocated memory is reused. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basisinit(ae_int_t ns, ae_int_t m, dualsimplexbasis* s, ae_state *_state) { ae_int_t i; s->ns = ns; s->m = m; ivectorgrowto(&s->idx, m, _state); ivectorgrowto(&s->nidx, ns, _state); bvectorgrowto(&s->isbasic, ns+m, _state); for(i=0; i<=ns-1; i++) { s->nidx.ptr.p_int[i] = i; s->isbasic.ptr.p_bool[i] = ae_false; } for(i=0; i<=m-1; i++) { s->idx.ptr.p_int[i] = ns+i; s->isbasic.ptr.p_bool[ns+i] = ae_true; } s->trftype = 3; s->trfage = 0; s->isvalidtrf = ae_false; rvectorsetlengthatleast(&s->dseweights, m, _state); for(i=0; i<=m-1; i++) { s->dseweights.ptr.p_double[i] = 1.0; } s->dsevalid = ae_false; } /************************************************************************* This function resizes basis. It is assumed that constraint matrix is completely overwritten by new one, but both matrices are similar enough so we can reuse previous basis. Dual steepest edge weights are invalidated by this function. This function: * tries to resize basis * if possible, returns True and valid basis with valid factorization * if resize is impossible (or abandoned due to stability reasons), it returns False and basis object is left in the invalid state (you have to reinitialize it by all-logicals basis) Following types of resize are supported: * new basis size is larger than previous one => logical elements are added to the new basis * basis sizes match => no operation is performed * new basis size is zero => basis is set to zero This function: * requires valid triangular factorization at S on entry * replaces it by another, valid factorization * checks that new factorization deviates from the previous one not too much by comparing magnitudes of min[abs(u_ii)] in both factorization (sharp decrease results in attempt to resize being abandoned IMPORTANT: if smooth resize is not possible, this function throws an exception! It is responsibility of the caller to check that smooth resize is possible -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_basistryresize(dualsimplexbasis* s, ae_int_t newm, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state) { ae_int_t ns; ae_int_t oldm; ae_int_t i; double oldminu; double newminu; ae_bool result; ns = s->ns; oldm = s->m; result = ae_false; /* * Quick exit strategies */ if( newm==0 ) { reviseddualsimplex_basisinit(ns, 0, s, _state); result = ae_true; return result; } /* * Same size or larger */ if( newm>=oldm ) { ae_assert(s->isvalidtrf||oldm==0, "BasisTryResize: needs valid TRF in S", _state); /* * Save information about matrix conditioning */ oldminu = reviseddualsimplex_basisminimumdiagonalelement(s, _state); /* * Growth if needed */ s->m = newm; ivectorgrowto(&s->idx, newm, _state); bvectorgrowto(&s->isbasic, ns+newm, _state); for(i=oldm; i<=newm-1; i++) { s->idx.ptr.p_int[i] = ns+i; s->isbasic.ptr.p_bool[ns+i] = ae_true; } /* * DSE weights are invalid and filled by 1.0 */ rvectorgrowto(&s->dseweights, newm, _state); for(i=0; i<=newm-1; i++) { s->dseweights.ptr.p_double[i] = 1.0; } s->dsevalid = ae_false; /* * Invalidate TRF. * Try to refactorize. */ s->isvalidtrf = ae_false; newminu = reviseddualsimplex_basisfreshtrfunsafe(s, at, settings, _state); result = ae_fp_greater_eq(newminu,reviseddualsimplex_maxudecay*oldminu); return result; } ae_assert(ae_false, "BasisTryResize: unexpected branch", _state); return result; } /************************************************************************* This function returns minimum diagonal element of S. Result=1 is returned for M=0. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static double reviseddualsimplex_basisminimumdiagonalelement(dualsimplexbasis* s, ae_state *_state) { double v; double vv; ae_int_t i; ae_int_t m; double result; m = s->m; if( m==0 ) { result = (double)(1); return result; } ae_assert(((s->trftype==0||s->trftype==1)||s->trftype==2)||s->trftype==3, "BasisMinimumDiagonalElement: unexpected TRF type", _state); ae_assert(s->isvalidtrf, "BasisMinimumDiagonalElement: TRF is invalid", _state); v = ae_maxrealnumber; for(i=0; i<=m-1; i++) { vv = (double)(0); if( s->trftype==0||s->trftype==1 ) { vv = s->denselu.ptr.pp_double[i][i]; } if( s->trftype==2||s->trftype==3 ) { vv = sparsegetdiagonal(&s->sparseu, i, _state); } if( vv<0 ) { vv = -vv; } if( vvns = s0->ns; s1->m = s0->m; copyintegerarray(&s0->idx, &s1->idx, _state); copyintegerarray(&s0->nidx, &s1->nidx, _state); copybooleanarray(&s0->isbasic, &s1->isbasic, _state); s1->isvalidtrf = ae_false; s1->trftype = -1; s1->dsevalid = ae_false; if( s0->m>0 ) { ae_assert(s0->isvalidtrf, "BasisExport: valid factorization is required for source basis", _state); s1->eminu = reviseddualsimplex_basisminimumdiagonalelement(s0, _state); } else { s1->eminu = (double)(1); } } /************************************************************************* This function imports from S1 to S0 a division of variables into basic/nonbasic ones; only basic/nonbasic sets are imported. Triangular factorization is not imported; however, this function checks that new factorization deviates from the previous one not too much by comparing magnitudes of min[abs(u_ii)] in both factorization (basis being imported stores statistics about U). Sharp decrease of diagonal elements means that we have too unstable situation which results in import being abandoned. In this case False is returned, and the basis S0 is left in the indeterminate invalid state (you have to reinitialize it by all-logicals). IMPORTANT: if metrics of S0 and S1 do not match, an exception will be generated. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_basistryimportfrom(dualsimplexbasis* s0, dualsimplexbasis* s1, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state) { ae_int_t i; double newminu; ae_bool result; ae_assert(s0->ns==s1->ns, "BasisImportFrom: structural variable counts do not match", _state); s0->m = s1->m; for(i=0; i<=s0->m-1; i++) { s0->idx.ptr.p_int[i] = s1->idx.ptr.p_int[i]; } for(i=0; i<=s0->ns-1; i++) { s0->nidx.ptr.p_int[i] = s1->nidx.ptr.p_int[i]; } for(i=0; i<=s0->m+s0->ns-1; i++) { s0->isbasic.ptr.p_bool[i] = s1->isbasic.ptr.p_bool[i]; } s0->isvalidtrf = ae_false; rvectorsetlengthatleast(&s0->dseweights, s1->m, _state); for(i=0; i<=s1->m-1; i++) { s0->dseweights.ptr.p_double[i] = 1.0; } s0->dsevalid = ae_false; newminu = reviseddualsimplex_basisfreshtrfunsafe(s0, at, settings, _state); result = ae_fp_greater_eq(newminu,reviseddualsimplex_maxudecay*s1->eminu); if( !result ) { s0->isvalidtrf = ae_false; s0->trftype = -1; } return result; } /************************************************************************* This function computes fresh triangular factorization. If TRF of age 0 (fresh) is already present, no new factorization is calculated. If factorization has exactly zero element along diagonal, this function generates exception. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basisfreshtrf(dualsimplexbasis* s, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state) { double v; v = reviseddualsimplex_basisfreshtrfunsafe(s, at, settings, _state); ae_assert(ae_fp_greater(v,(double)(0)), "BasisFreshTrf: degeneracy of B is detected", _state); } /************************************************************************* This function computes fresh triangular factorization. If TRF of age 0 (fresh) is already present, no new factorization is calculated. It returns min[abs(u[i,i])] which can be used to determine whether factorization is degenerate or not (it will factorize anything, the question is whether it is possible to use factorization) -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static double reviseddualsimplex_basisfreshtrfunsafe(dualsimplexbasis* s, sparsematrix* at, dualsimplexsettings* settings, ae_state *_state) { ae_int_t m; ae_int_t ns; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t j0; ae_int_t j1; ae_int_t k1; ae_int_t nzl; ae_int_t nzu; ae_int_t nlogical; ae_int_t nstructural; ae_int_t offs; ae_int_t offs1; ae_int_t offs2; double result; m = s->m; ns = s->ns; result = (double)(0); /* * Compare TRF type with one required by settings, invalidation and refresh otherwise */ if( s->trftype!=settings->trftype ) { s->trftype = settings->trftype; s->isvalidtrf = ae_false; result = reviseddualsimplex_basisfreshtrfunsafe(s, at, settings, _state); return result; } /* * Is it valid and fresh? */ if( s->isvalidtrf&&s->trfage==0 ) { result = reviseddualsimplex_basisminimumdiagonalelement(s, _state); return result; } /* * Dense TRF */ if( s->trftype==0||s->trftype==1 ) { ivectorsetlengthatleast(&s->colpermbwd, m, _state); for(i=0; i<=m-1; i++) { s->colpermbwd.ptr.p_int[i] = i; } rmatrixsetlengthatleast(&s->denselu, m, m, _state); for(i=0; i<=m-1; i++) { for(j=0; j<=m-1; j++) { s->denselu.ptr.pp_double[i][j] = (double)(0); } } for(i=0; i<=m-1; i++) { j0 = at->ridx.ptr.p_int[s->idx.ptr.p_int[i]]; j1 = at->ridx.ptr.p_int[s->idx.ptr.p_int[i]+1]-1; for(j=j0; j<=j1; j++) { s->denselu.ptr.pp_double[i][at->idx.ptr.p_int[j]] = at->vals.ptr.p_double[j]; } } rmatrixlu(&s->denselu, m, m, &s->tmpi, _state); reviseddualsimplex_pivottobwd(&s->tmpi, m, &s->rowpermbwd, _state); s->isvalidtrf = ae_true; s->trfage = 0; result = reviseddualsimplex_basisminimumdiagonalelement(s, _state); return result; } /* * Sparse TRF (with either PFI or Forest-Tomlin) */ if( s->trftype==2||s->trftype==3 ) { /* * Determine permutation which moves logical variables * to the beginning. * * NOTE: this reordering results in stable factorization * because we prenormalized constraints with 2-norm, * all elements in the logical columns are less than * 1.0 in magnitude. * * After this block is done we have following arrays: * * tCInvIdx[j], which is an inverse of ColPermBwf[] */ ivectorsetlengthatleast(&s->tcinvidx, m, _state); ivectorsetlengthatleast(&s->rowpermbwd, m, _state); ivectorsetlengthatleast(&s->colpermbwd, m, _state); for(i=0; i<=m-1; i++) { s->tcinvidx.ptr.p_int[i] = i; s->rowpermbwd.ptr.p_int[i] = i; s->colpermbwd.ptr.p_int[i] = i; } nlogical = 0; for(i=0; i<=m-1; i++) { if( s->idx.ptr.p_int[i]>=ns ) { j = s->rowpermbwd.ptr.p_int[nlogical]; s->rowpermbwd.ptr.p_int[nlogical] = s->rowpermbwd.ptr.p_int[i]; s->rowpermbwd.ptr.p_int[i] = j; j1 = s->tcinvidx.ptr.p_int[s->idx.ptr.p_int[i]-ns]; j = s->colpermbwd.ptr.p_int[j1]; s->colpermbwd.ptr.p_int[j1] = s->colpermbwd.ptr.p_int[nlogical]; s->colpermbwd.ptr.p_int[nlogical] = j; s->tcinvidx.ptr.p_int[s->colpermbwd.ptr.p_int[nlogical]] = nlogical; s->tcinvidx.ptr.p_int[s->colpermbwd.ptr.p_int[j1]] = j1; nlogical = nlogical+1; } } sortmiddlei(&s->colpermbwd, nlogical, m-nlogical, _state); for(i=0; i<=m-1; i++) { s->tcinvidx.ptr.p_int[s->colpermbwd.ptr.p_int[i]] = i; } nstructural = m-nlogical; /* * Prepare SparseLU1 to receive factored out logical part of the matrix * and SparseLU2 to receive structural part of the matrix. */ ivectorsetlengthatleast(&s->sparselu1.ridx, nstructural+1, _state); ivectorsetlengthatleast(&s->sparselu1.didx, nstructural, _state); ivectorsetlengthatleast(&s->sparselu1.uidx, nstructural, _state); s->sparselu1.matrixtype = 1; s->sparselu1.m = nstructural; s->sparselu1.n = nlogical; s->sparselu1.ridx.ptr.p_int[0] = 0; ivectorsetlengthatleast(&s->sparselu2.ridx, nstructural+1, _state); ivectorsetlengthatleast(&s->sparselu2.didx, nstructural, _state); ivectorsetlengthatleast(&s->sparselu2.uidx, nstructural, _state); s->sparselu2.matrixtype = 1; s->sparselu2.m = nstructural; s->sparselu2.n = nstructural; s->sparselu2.ridx.ptr.p_int[0] = 0; /* * Reorder array, perform LU factorization */ for(k=0; k<=nstructural-1; k++) { /* * Make sure SparseLU1 and SparseLU2 have enough place. */ offs1 = s->sparselu1.ridx.ptr.p_int[k]; offs2 = s->sparselu2.ridx.ptr.p_int[k]; ivectorgrowto(&s->sparselu1.idx, offs1+m, _state); rvectorgrowto(&s->sparselu1.vals, offs1+m, _state); ivectorgrowto(&s->sparselu2.idx, offs2+m, _state); rvectorgrowto(&s->sparselu2.vals, offs2+m, _state); /* * Extract K-th row of the SparseLU1/2 (I-th row of the original matrix) */ i = s->rowpermbwd.ptr.p_int[k+nlogical]; j0 = at->ridx.ptr.p_int[s->idx.ptr.p_int[i]]; j1 = at->ridx.ptr.p_int[s->idx.ptr.p_int[i]+1]-1; for(j=j0; j<=j1; j++) { k1 = s->tcinvidx.ptr.p_int[at->idx.ptr.p_int[j]]; if( k1sparselu1.idx.ptr.p_int[offs1] = k1; s->sparselu1.vals.ptr.p_double[offs1] = at->vals.ptr.p_double[j]; offs1 = offs1+1; } else { /* * Append element to SparseLU2 */ s->sparselu2.idx.ptr.p_int[offs2] = k1-nlogical; s->sparselu2.vals.ptr.p_double[offs2] = at->vals.ptr.p_double[j]; offs2 = offs2+1; } } /* * Elements added to the last row of LU1 can be unordered, * so it needs resorting. * * LU2 does NOT need resorting because trailing NStructural * elements of permutation were post-sorted to produce * already sorted results. */ tagsortmiddleir(&s->sparselu1.idx, &s->sparselu1.vals, s->sparselu1.ridx.ptr.p_int[k], offs1-s->sparselu1.ridx.ptr.p_int[k], _state); s->sparselu1.ridx.ptr.p_int[k+1] = offs1; s->sparselu2.ridx.ptr.p_int[k+1] = offs2; } s->sparselu1.ninitialized = s->sparselu1.ridx.ptr.p_int[nstructural]; s->sparselu2.ninitialized = s->sparselu2.ridx.ptr.p_int[nstructural]; sparseinitduidx(&s->sparselu1, _state); sparseinitduidx(&s->sparselu2, _state); if( nstructural>0 ) { sptrflu(&s->sparselu2, 2, &s->densep2, &s->densep2c, &s->lubuf2, _state); for(i=0; i<=nstructural-1; i++) { j = s->rowpermbwd.ptr.p_int[i+nlogical]; s->rowpermbwd.ptr.p_int[i+nlogical] = s->rowpermbwd.ptr.p_int[s->densep2.ptr.p_int[i]+nlogical]; s->rowpermbwd.ptr.p_int[s->densep2.ptr.p_int[i]+nlogical] = j; j = s->colpermbwd.ptr.p_int[i+nlogical]; s->colpermbwd.ptr.p_int[i+nlogical] = s->colpermbwd.ptr.p_int[s->densep2c.ptr.p_int[i]+nlogical]; s->colpermbwd.ptr.p_int[s->densep2c.ptr.p_int[i]+nlogical] = j; } /* * Process L factor: * * 1. count number of non-zeros in the L factor, * 2. fill NLogical*NLogical leading block * 3. NStructural*M bottom block */ nzl = nlogical; for(i=0; i<=nstructural-1; i++) { k = s->lubuf2.rowpermrawidx.ptr.p_int[i]; nzl = nzl+(s->sparselu1.ridx.ptr.p_int[k+1]-s->sparselu1.ridx.ptr.p_int[k]); nzl = nzl+1+(s->sparselu2.didx.ptr.p_int[i]-s->sparselu2.ridx.ptr.p_int[i]); } rvectorsetlengthatleast(&s->sparsel.vals, nzl, _state); ivectorsetlengthatleast(&s->sparsel.idx, nzl, _state); ivectorsetlengthatleast(&s->sparsel.ridx, m+1, _state); ivectorsetlengthatleast(&s->sparsel.didx, m, _state); ivectorsetlengthatleast(&s->sparsel.uidx, m, _state); s->sparsel.matrixtype = 1; s->sparsel.m = m; s->sparsel.n = m; s->sparsel.ninitialized = nzl; s->sparsel.ridx.ptr.p_int[0] = 0; for(i=0; i<=nlogical-1; i++) { s->sparsel.idx.ptr.p_int[i] = i; s->sparsel.vals.ptr.p_double[i] = 1.0; s->sparsel.ridx.ptr.p_int[i+1] = i+1; } for(i=0; i<=nstructural-1; i++) { offs = s->sparsel.ridx.ptr.p_int[nlogical+i]; k = s->lubuf2.rowpermrawidx.ptr.p_int[i]; j0 = s->sparselu1.ridx.ptr.p_int[k]; j1 = s->sparselu1.ridx.ptr.p_int[k+1]-1; for(j=j0; j<=j1; j++) { s->sparsel.idx.ptr.p_int[offs] = s->sparselu1.idx.ptr.p_int[j]; s->sparsel.vals.ptr.p_double[offs] = -s->sparselu1.vals.ptr.p_double[j]; offs = offs+1; } j0 = s->sparselu2.ridx.ptr.p_int[i]; j1 = s->sparselu2.didx.ptr.p_int[i]-1; for(j=j0; j<=j1; j++) { s->sparsel.idx.ptr.p_int[offs] = nlogical+s->sparselu2.idx.ptr.p_int[j]; s->sparsel.vals.ptr.p_double[offs] = s->sparselu2.vals.ptr.p_double[j]; offs = offs+1; } s->sparsel.idx.ptr.p_int[offs] = nlogical+i; s->sparsel.vals.ptr.p_double[offs] = 1.0; offs = offs+1; s->sparsel.ridx.ptr.p_int[nlogical+i+1] = offs; } ae_assert(s->sparsel.ninitialized==s->sparsel.ridx.ptr.p_int[m], "BasisFreshTrf: integrity check failed", _state); sparseinitduidx(&s->sparsel, _state); /* * Process U factor: * * 1. count number of non-zeros in the U factor, * 2. fill NLogical*NLogical leading block * 3. NStructural*NStructural bottom block */ nzu = nlogical; for(i=0; i<=nstructural-1; i++) { nzu = nzu+1+(s->sparselu2.ridx.ptr.p_int[i+1]-s->sparselu2.uidx.ptr.p_int[i]); } rvectorsetlengthatleast(&s->sparseu.vals, nzu, _state); ivectorsetlengthatleast(&s->sparseu.idx, nzu, _state); ivectorsetlengthatleast(&s->sparseu.ridx, m+1, _state); ivectorsetlengthatleast(&s->sparseu.didx, m, _state); ivectorsetlengthatleast(&s->sparseu.uidx, m, _state); s->sparseu.matrixtype = 1; s->sparseu.m = m; s->sparseu.n = m; s->sparseu.ninitialized = nzu; s->sparseu.ridx.ptr.p_int[0] = 0; for(i=0; i<=nlogical-1; i++) { s->sparseu.idx.ptr.p_int[i] = i; s->sparseu.vals.ptr.p_double[i] = -1.0; s->sparseu.ridx.ptr.p_int[i+1] = i+1; } for(i=0; i<=nstructural-1; i++) { offs = s->sparseu.ridx.ptr.p_int[nlogical+i]; s->sparseu.idx.ptr.p_int[offs] = nlogical+i; j = s->sparselu2.didx.ptr.p_int[i]; if( jsparselu2.uidx.ptr.p_int[i] ) { ae_assert(s->sparselu2.idx.ptr.p_int[j]==i, "BasisFreshTrf: integrity check failed", _state); s->sparseu.vals.ptr.p_double[offs] = s->sparselu2.vals.ptr.p_double[j]; } else { s->sparseu.vals.ptr.p_double[offs] = (double)(0); } offs = offs+1; j0 = s->sparselu2.uidx.ptr.p_int[i]; j1 = s->sparselu2.ridx.ptr.p_int[i+1]-1; for(j=j0; j<=j1; j++) { s->sparseu.idx.ptr.p_int[offs] = nlogical+s->sparselu2.idx.ptr.p_int[j]; s->sparseu.vals.ptr.p_double[offs] = s->sparselu2.vals.ptr.p_double[j]; offs = offs+1; } s->sparseu.ridx.ptr.p_int[nlogical+i+1] = offs; } ae_assert(s->sparseu.ninitialized==s->sparseu.ridx.ptr.p_int[m], "BasisFreshTrf: integrity check failed", _state); sparseinitduidx(&s->sparseu, _state); } else { ivectorsetlengthatleast(&s->nrs, m, _state); for(i=0; i<=m-1; i++) { s->nrs.ptr.p_int[i] = 1; } sparsecreatecrsbuf(m, m, &s->nrs, &s->sparsel, _state); for(i=0; i<=nlogical-1; i++) { sparseset(&s->sparsel, i, i, 1.0, _state); } sparsecreatecrsbuf(m, m, &s->nrs, &s->sparseu, _state); for(i=0; i<=nlogical-1; i++) { sparseset(&s->sparseu, i, i, -1.0, _state); } } sparsecopytransposecrsbuf(&s->sparseu, &s->sparseut, _state); s->isvalidtrf = ae_true; s->trfage = 0; result = reviseddualsimplex_basisminimumdiagonalelement(s, _state); return result; } /* * */ ae_assert(ae_false, "BasisFreshTrf: unexpected TRF type", _state); return result; } /************************************************************************* This function fills S.DSEWeights by actual weights according to current settings and sets validity flag. Basis object MUST store valid triangular factorization, otherwise this function throws an exception. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basisrequestweights(dualsimplexbasis* s, dualsimplexsettings* settings, ae_state *_state) { ae_int_t m; ae_int_t ns; ae_int_t i; ae_int_t j; double v; double vv; m = s->m; ns = s->ns; ae_assert((settings->pricing==-1||settings->pricing==0)||settings->pricing==1, "BasisRequestWeights: unknown pricing type", _state); ae_assert(s->isvalidtrf, "BasisRequestWeights: factorization is not computed prior to calling this function", _state); /* * If weights are valid, return immediately */ if( s->dsevalid ) { return; } /* * Compute weights from scratch */ if( settings->pricing==-1||settings->pricing==1 ) { for(i=0; i<=m-1; i++) { if( s->idx.ptr.p_int[i]wtmp0, m, _state); rvectorsetlengthatleast(&s->wtmp1, m, _state); for(j=0; j<=m-1; j++) { s->wtmp0.ptr.p_double[j] = (double)(0); } s->wtmp0.ptr.p_double[i] = (double)(1); reviseddualsimplex_basissolvet(s, &s->wtmp0, &s->wtmp1, &s->wtmp2, _state); v = (double)(0); for(j=0; j<=m-1; j++) { vv = s->wtmp1.ptr.p_double[j]; v = v+vv*vv; } s->dseweights.ptr.p_double[i] = v; } else { /* * Logical variable, weight can be set to 1.0 */ s->dseweights.ptr.p_double[i] = 1.0; } } s->dsevalid = ae_true; return; } /* * Compute weights from scratch */ if( settings->pricing==0 ) { for(i=0; i<=m-1; i++) { s->dseweights.ptr.p_double[i] = 1.0; } s->dsevalid = ae_true; return; } ae_assert(ae_false, "BasisRequestWeights: unexpected pricing type", _state); } /************************************************************************* This function updates triangular factorization by adding Q to basis and removing P from basis. It also updates index tables IsBasic[], BasicIdx[], Basis.NIdx[]. AlphaQim contains intermediate result from Ftran for AlphaQ, it is used by Forest-Tomlin update scheme. If other update is used, it is not referenced at all. X[], D[], Z are NOT recomputed. Tau is used if Settings.Pricing=1, ignored otherwise. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basisupdatetrf(dualsimplexbasis* s, sparsematrix* at, ae_int_t p, ae_int_t q, /* Real */ ae_vector* alphaq, /* Real */ ae_vector* alphaqim, ae_int_t r, /* Real */ ae_vector* tau, dualsimplexsettings* settings, ae_state *_state) { ae_int_t m; ae_int_t nn; ae_int_t i; ae_int_t j; ae_bool processed; double invaq; ae_int_t dstoffs; ae_int_t srcoffs; ae_int_t srcidx; double srcval; double vcorner; ae_int_t idxd; double v; m = s->m; nn = s->ns; /* * Update index tables * * TODO: better code!!!!!!!!!!!!!!!!!!!!!!! */ s->isbasic.ptr.p_bool[p] = ae_false; s->isbasic.ptr.p_bool[q] = ae_true; for(i=0; i<=m-1; i++) { if( s->idx.ptr.p_int[i]==p ) { s->idx.ptr.p_int[i] = q; break; } } for(i=0; i<=nn-1; i++) { if( s->nidx.ptr.p_int[i]==q ) { s->nidx.ptr.p_int[i] = p; break; } } /* * Update dense factorization */ if( ((s->trftype!=settings->trftype||s->trftype==0)||!s->isvalidtrf)||s->trfage>=settings->maxtrfage ) { /* * Complete refresh is needed for factorization */ s->isvalidtrf = ae_false; reviseddualsimplex_basisfreshtrf(s, at, settings, _state); } else { processed = ae_false; if( (s->trftype==0||s->trftype==1)||s->trftype==2 ) { /* * Dense/sparse factorizations with dense PFI */ ae_assert(ae_fp_neq(alphaq->ptr.p_double[r],(double)(0)), "BasisUpdateTrf: integrity check failed, AlphaQ[R]=0", _state); rvectorgrowto(&s->densepfieta, (s->trfage+1)*m, _state); ivectorgrowto(&s->rk, s->trfage+1, _state); s->rk.ptr.p_int[s->trfage] = r; invaq = 1.0/alphaq->ptr.p_double[r]; for(i=0; i<=m-1; i++) { if( i!=r ) { s->densepfieta.ptr.p_double[s->trfage*m+i] = -alphaq->ptr.p_double[i]*invaq; } else { s->densepfieta.ptr.p_double[s->trfage*m+i] = invaq; } } inc(&s->trfage, _state); processed = ae_true; } if( s->trftype==3 ) { /* * Sparse factorization with Forest-Tomlin update */ ae_assert(ae_fp_neq(alphaq->ptr.p_double[r],(double)(0)), "BasisUpdateTrf: integrity check failed, AlphaQ[R]=0", _state); rvectorgrowto(&s->densemu, (s->trfage+1)*m, _state); ivectorgrowto(&s->rk, s->trfage+1, _state); ivectorgrowto(&s->dk, s->trfage+1, _state); rvectorsetlengthatleast(&s->utmp0, m, _state); /* * Determine D - index of row being overwritten by Forest-Tomlin update */ idxd = -1; for(i=0; i<=m-1; i++) { if( s->rowpermbwd.ptr.p_int[i]==r ) { idxd = i; break; } } ae_assert(idxd>=0, "BasisUpdateTrf: unexpected integrity check failure", _state); s->rk.ptr.p_int[s->trfage] = r; s->dk.ptr.p_int[s->trfage] = idxd; /* * Modify L with permutation which moves D-th row/column to the end: * * rows 0...D-1 are left intact * * rows D+1...M-1 are moved one position up, with columns 0..D-1 * retained as is, and columns D+1...M-1 being moved one position left. * * last row is filled by permutation/modification of AlphaQim * Determine FT update coefficients in the process. */ ivectorgrowto(&s->sparsel.idx, s->sparsel.ridx.ptr.p_int[m]+m, _state); rvectorgrowto(&s->sparsel.vals, s->sparsel.ridx.ptr.p_int[m]+m, _state); for(i=0; i<=m-1; i++) { s->utmp0.ptr.p_double[i] = (double)(0); } for(i=idxd+1; i<=m-1; i++) { j = s->sparsel.ridx.ptr.p_int[i+1]-1; if( s->sparsel.idx.ptr.p_int[j]!=i||s->sparsel.vals.ptr.p_double[j]!=1 ) { ae_assert(ae_false, "UpdateTrf: integrity check failed for sparse L", _state); } dstoffs = s->sparsel.ridx.ptr.p_int[i-1]; srcoffs = s->sparsel.ridx.ptr.p_int[i]; /* * Read first element in the row (it has at least one - unit diagonal) */ srcidx = s->sparsel.idx.ptr.p_int[srcoffs]; srcval = s->sparsel.vals.ptr.p_double[srcoffs]; /* * Read/write columns 0...D-1 */ while(srcidxsparsel.idx.ptr.p_int[dstoffs] = srcidx; s->sparsel.vals.ptr.p_double[dstoffs] = srcval; dstoffs = dstoffs+1; srcoffs = srcoffs+1; srcidx = s->sparsel.idx.ptr.p_int[srcoffs]; srcval = s->sparsel.vals.ptr.p_double[srcoffs]; } /* * If we have non-zero element in column D, use it as * right-hand side of intermediate linear system which * is used to determine coefficients of update matrix. */ if( srcidx==idxd ) { s->utmp0.ptr.p_double[i-1] = srcval; srcoffs = srcoffs+1; srcidx = s->sparsel.idx.ptr.p_int[srcoffs]; srcval = s->sparsel.vals.ptr.p_double[srcoffs]; } /* * Process columns D+1...I-1 */ v = s->utmp0.ptr.p_double[i-1]; while(srcidxsparsel.idx.ptr.p_int[dstoffs] = srcidx-1; s->sparsel.vals.ptr.p_double[dstoffs] = srcval; v = v-srcval*s->utmp0.ptr.p_double[srcidx-1]; dstoffs = dstoffs+1; srcoffs = srcoffs+1; srcidx = s->sparsel.idx.ptr.p_int[srcoffs]; srcval = s->sparsel.vals.ptr.p_double[srcoffs]; } s->utmp0.ptr.p_double[i-1] = v; /* * Write out unit diagonal, finalize row */ s->sparsel.idx.ptr.p_int[dstoffs] = i-1; s->sparsel.vals.ptr.p_double[dstoffs] = (double)(1); dstoffs = dstoffs+1; s->sparsel.ridx.ptr.p_int[i] = dstoffs; } s->utmp0.ptr.p_double[m-1] = (double)(1); dstoffs = s->sparsel.ridx.ptr.p_int[m-1]; for(j=0; j<=idxd-1; j++) { v = alphaqim->ptr.p_double[j]; if( v!=0 ) { s->sparsel.idx.ptr.p_int[dstoffs] = j; s->sparsel.vals.ptr.p_double[dstoffs] = v; dstoffs = dstoffs+1; } } vcorner = alphaqim->ptr.p_double[idxd]; for(j=idxd+1; j<=m-1; j++) { v = alphaqim->ptr.p_double[j]; if( v!=0 ) { s->sparsel.idx.ptr.p_int[dstoffs] = j-1; s->sparsel.vals.ptr.p_double[dstoffs] = v; dstoffs = dstoffs+1; vcorner = vcorner-v*s->utmp0.ptr.p_double[j-1]; } } s->sparsel.idx.ptr.p_int[dstoffs] = m-1; s->sparsel.vals.ptr.p_double[dstoffs] = (double)(1); dstoffs = dstoffs+1; s->sparsel.ridx.ptr.p_int[m] = dstoffs; s->sparsel.ninitialized = s->sparsel.ridx.ptr.p_int[m]; for(i=0; i<=m-1; i++) { j = s->sparsel.ridx.ptr.p_int[i+1]; s->sparsel.didx.ptr.p_int[i] = j-1; s->sparsel.uidx.ptr.p_int[i] = j; } ae_assert(vcorner!=0, "UpdateTrf: corner element is zero, degeneracy detected", _state); v = 1/vcorner; for(i=0; i<=m-2; i++) { s->densemu.ptr.p_double[s->trfage*m+i] = -s->utmp0.ptr.p_double[i]*v; } s->densemu.ptr.p_double[s->trfage*m+m-1] = v; /* * Multiply row permutation matrix by cyclic permutation applied to L */ reviseddualsimplex_inversecyclicpermutation(&s->rowpermbwd, m, idxd, &s->utmpi, _state); /* * Done */ inc(&s->trfage, _state); processed = ae_true; } ae_assert(processed, "BasisUpdateTrf: unexpected TRF type", _state); } /* * Update pricing weights */ ae_assert((settings->pricing==-1||settings->pricing==0)||settings->pricing==1, "BasisUpdateTrf: unexpected Settings.Pricing", _state); processed = ae_false; if( settings->pricing==-1 ) { /* * Weights are recomputed from scratch at every step. * VERY, VERY time consuming, used only for debug purposes. */ s->dsevalid = ae_false; reviseddualsimplex_basisrequestweights(s, settings, _state); processed = ae_true; } if( settings->pricing==0 ) { /* * Weights are filled by 1.0 */ if( !s->dsevalid ) { for(i=0; i<=m-1; i++) { s->dseweights.ptr.p_double[i] = 1.0; } s->dsevalid = ae_true; } processed = ae_true; } if( settings->pricing==1 ) { /* * Weights are computed using DSE update formula. */ if( s->dsevalid ) { /* * Compute using update formula */ for(i=0; i<=m-1; i++) { if( i!=r ) { s->dseweights.ptr.p_double[i] = s->dseweights.ptr.p_double[i]-2*(alphaq->ptr.p_double[i]/alphaq->ptr.p_double[r])*tau->ptr.p_double[i]+s->dseweights.ptr.p_double[r]*ae_sqr(alphaq->ptr.p_double[i]/alphaq->ptr.p_double[r], _state); s->dseweights.ptr.p_double[i] = ae_maxreal(s->dseweights.ptr.p_double[i], reviseddualsimplex_minbeta, _state); } } s->dseweights.ptr.p_double[r] = s->dseweights.ptr.p_double[r]/(alphaq->ptr.p_double[r]*alphaq->ptr.p_double[r]); } else { /* * No prior values, compute from scratch (usually it is done only once) */ reviseddualsimplex_basisrequestweights(s, settings, _state); } processed = ae_true; } ae_assert(processed, "BasisUpdateTrf: unexpected pricing type", _state); } /************************************************************************* This function computes solution to B*x=r. Output array is reallocated if needed. Temporary array TmpX[] is used and reallocated if necessary. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basissolve(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* tmpx, ae_state *_state) { reviseddualsimplex_basissolvex(s, r, x, x, ae_false, tmpx, _state); } /************************************************************************* This function computes solution to B*x=r. It also additionally outputs intermediate result of multiplication by inv(DS)*inv(U)*inv(colPerm), a value essential for Forest-Tomlin update. Output arrays are reallocated if needed. Temporary array TX[] can be used/reallocated. If NeedIntermediate is False or Forest-Tomlin updates are not used, then Xim[] is not referenced at all. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basissolvex(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* xim, ae_bool needintermediate, /* Real */ ae_vector* tx, ae_state *_state) { ae_int_t m; ae_int_t i; ae_int_t d; ae_int_t k; double v; double vd; double vv; ae_bool processed; ae_assert(s->isvalidtrf, "BasisSolve: integrity check failed", _state); m = s->m; processed = ae_false; rvectorsetlengthatleast(tx, m, _state); /* * Dense/sparse factorizations with dense PFI * * NOTE: although we solve B*x=r, internally we store factorization of B^T */ if( (s->trftype==0||s->trftype==1)||s->trftype==2 ) { ae_assert(s->trfage==0||s->trftype!=0, "BasisSolve: integrity check failed TrfAge vs TrfType", _state); rvectorsetlengthatleast(x, m, _state); for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = r->ptr.p_double[s->colpermbwd.ptr.p_int[i]]; } if( s->trftype==0||s->trftype==1 ) { /* * Dense TRF */ rmatrixtrsv(m, &s->denselu, 0, 0, ae_true, ae_false, 1, x, 0, _state); rmatrixtrsv(m, &s->denselu, 0, 0, ae_false, ae_true, 1, x, 0, _state); } else { /* * Sparse TRF */ sparsetrsv(&s->sparseu, ae_true, ae_false, 1, x, _state); sparsetrsv(&s->sparsel, ae_false, ae_false, 1, x, _state); } for(i=0; i<=m-1; i++) { tx->ptr.p_double[s->rowpermbwd.ptr.p_int[i]] = x->ptr.p_double[i]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } for(k=0; k<=s->trfage-1; k++) { v = x->ptr.p_double[s->rk.ptr.p_int[k]]; for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]+s->densepfieta.ptr.p_double[k*m+i]*v; } x->ptr.p_double[s->rk.ptr.p_int[k]] = x->ptr.p_double[s->rk.ptr.p_int[k]]-v; } processed = ae_true; } /* * Sparse factorization with Forest-Tomlin update * * NOTE: although we solve B*x=r, internally we store factorization of B^T */ if( s->trftype==3 ) { rvectorsetlengthatleast(x, m, _state); for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = r->ptr.p_double[s->colpermbwd.ptr.p_int[i]]; } sparsetrsv(&s->sparseu, ae_true, ae_false, 1, x, _state); for(k=0; k<=s->trfage-1; k++) { /* * The code below is an amalgamation of two parts: * * cyclic permutation * V:=X[D]; * for I:=D to M-2 do * X[I]:=X[I+1]; * X[M-1]:=V; * * and triangular factor * V:=0; * for I:=D to M-1 do * V:=V+X[I]*S.DenseMu[K*M+I]; * X[M-1]:=V; */ d = s->dk.ptr.p_int[k]; vv = (double)(0); vd = x->ptr.p_double[d]; for(i=d; i<=m-2; i++) { v = x->ptr.p_double[i+1]; x->ptr.p_double[i] = v; vv = vv+v*s->densemu.ptr.p_double[k*m+i]; } x->ptr.p_double[m-1] = vv+vd*s->densemu.ptr.p_double[k*m+m-1]; } if( needintermediate ) { rvectorsetlengthatleast(xim, m, _state); for(i=0; i<=m-1; i++) { xim->ptr.p_double[i] = x->ptr.p_double[i]; } } sparsetrsv(&s->sparsel, ae_false, ae_false, 1, x, _state); for(i=0; i<=m-1; i++) { tx->ptr.p_double[s->rowpermbwd.ptr.p_int[i]] = x->ptr.p_double[i]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } processed = ae_true; } /* * Integrity check */ ae_assert(processed, "BasisSolve: unsupported TRF type", _state); v = (double)(0); for(i=0; i<=m-1; i++) { v = v+x->ptr.p_double[i]; } ae_assert(ae_isfinite(v, _state), "BasisSolve: integrity check failed (degeneracy in B?)", _state); } /************************************************************************* This function computes solution to (B^T)*x=r. Output array is reallocated if needed. TX[] temporary is reallocated if needed -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_basissolvet(dualsimplexbasis* s, /* Real */ ae_vector* r, /* Real */ ae_vector* x, /* Real */ ae_vector* tx, ae_state *_state) { ae_int_t m; ae_int_t i; ae_int_t d; ae_int_t k; double v; double vm; ae_bool processed; ae_assert(s->isvalidtrf, "BasisSolveT: integrity check failed", _state); m = s->m; processed = ae_false; rvectorsetlengthatleast(tx, m, _state); /* * Dense factorizations */ if( (s->trftype==0||s->trftype==1)||s->trftype==2 ) { ae_assert(s->trfage==0||s->trftype!=0, "BasisSolveT: integrity check failed TrfAge vs TrfType", _state); rvectorsetlengthatleast(x, m, _state); for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = r->ptr.p_double[i]; } for(k=s->trfage-1; k>=0; k--) { v = (double)(0); for(i=0; i<=m-1; i++) { v = v+s->densepfieta.ptr.p_double[k*m+i]*x->ptr.p_double[i]; } x->ptr.p_double[s->rk.ptr.p_int[k]] = v; } for(i=0; i<=m-1; i++) { tx->ptr.p_double[i] = x->ptr.p_double[s->rowpermbwd.ptr.p_int[i]]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } if( s->trftype==0||s->trftype==1 ) { /* * Dense TRF */ rmatrixtrsv(m, &s->denselu, 0, 0, ae_false, ae_true, 0, x, 0, _state); rmatrixtrsv(m, &s->denselu, 0, 0, ae_true, ae_false, 0, x, 0, _state); } else { /* * Sparse TRF */ sparsetrsv(&s->sparsel, ae_false, ae_false, 0, x, _state); sparsetrsv(&s->sparseu, ae_true, ae_false, 0, x, _state); } for(i=0; i<=m-1; i++) { tx->ptr.p_double[s->colpermbwd.ptr.p_int[i]] = x->ptr.p_double[i]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } processed = ae_true; } /* * Sparse factorization with Forest-Tomlin update */ if( s->trftype==3 ) { rvectorsetlengthatleast(x, m, _state); for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = r->ptr.p_double[i]; } for(i=0; i<=m-1; i++) { tx->ptr.p_double[i] = x->ptr.p_double[s->rowpermbwd.ptr.p_int[i]]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } sparsetrsv(&s->sparsel, ae_false, ae_false, 0, x, _state); for(k=s->trfage-1; k>=0; k--) { /* * The code below is an amalgamation of two parts: * * triangular factor * V:=X[M-1]; * for I:=D to M-2 do * X[I]:=X[I]+S.DenseMu[K*M+I]*V; * X[M-1]:=S.DenseMu[K*M+(M-1)]*V; * * inverse of cyclic permutation * V:=X[M-1]; * for I:=M-1 downto D+1 do * X[I]:=X[I-1]; * X[D]:=V; */ d = s->dk.ptr.p_int[k]; vm = x->ptr.p_double[m-1]; v = s->densemu.ptr.p_double[k*m+(m-1)]*vm; if( vm!=0 ) { /* * X[M-1] is non-zero, apply update */ for(i=m-2; i>=d; i--) { x->ptr.p_double[i+1] = x->ptr.p_double[i]+s->densemu.ptr.p_double[k*m+i]*vm; } } else { /* * X[M-1] is zero, just cyclic permutation */ for(i=m-2; i>=d; i--) { x->ptr.p_double[i+1] = x->ptr.p_double[i]; } } x->ptr.p_double[d] = v; } sparsetrsv(&s->sparseut, ae_false, ae_false, 1, x, _state); for(i=0; i<=m-1; i++) { tx->ptr.p_double[s->colpermbwd.ptr.p_int[i]] = x->ptr.p_double[i]; } for(i=0; i<=m-1; i++) { x->ptr.p_double[i] = tx->ptr.p_double[i]; } processed = ae_true; } /* * Integrity check */ ae_assert(processed, "BasisSolveT: unsupported TRF type", _state); v = (double)(0); for(i=0; i<=m-1; i++) { v = v+x->ptr.p_double[i]; } ae_assert(ae_isfinite(v, _state), "BasisSolveT: integrity check failed (degeneracy in B?)", _state); } /************************************************************************* This function computes product AN*XN, where AN is a non-basic subset of columns of A, and XN is a non-basic subset of columns of X. Output array is reallocated if its size is too small. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_computeanxn(dualsimplexstate* state, dualsimplexsubproblem* subproblem, /* Real */ ae_vector* x, /* Real */ ae_vector* y, ae_state *_state) { ae_int_t nn; ae_int_t nx; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t j0; ae_int_t j1; double v; nx = subproblem->ns+subproblem->m; m = subproblem->m; nn = nx-m; /* * Integrity check */ ae_assert(subproblem->state>=reviseddualsimplex_ssvalidxn, "ComputeANXN: XN is invalid", _state); /* * Compute */ rvectorsetlengthatleast(y, m, _state); for(i=0; i<=m-1; i++) { y->ptr.p_double[i] = (double)(0); } for(i=0; i<=nn-1; i++) { j0 = state->at.ridx.ptr.p_int[state->basis.nidx.ptr.p_int[i]]; j1 = state->at.ridx.ptr.p_int[state->basis.nidx.ptr.p_int[i]+1]-1; v = x->ptr.p_double[state->basis.nidx.ptr.p_int[i]]; for(j=j0; j<=j1; j++) { k = state->at.idx.ptr.p_int[j]; y->ptr.p_double[k] = y->ptr.p_double[k]+v*state->at.vals.ptr.p_double[j]; } } } /************************************************************************* This function computes product (AN^T)*y, where AN is a non-basic subset of columns of A, and y is some vector. Output array is set to full NX-sized length, with basic components of the output being set to zeros. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_computeantv(dualsimplexstate* state, /* Real */ ae_vector* y, /* Real */ ae_vector* r, ae_state *_state) { ae_int_t nn; ae_int_t nx; ae_int_t m; ae_int_t i; ae_int_t j; ae_int_t j0; ae_int_t j1; double v; nx = state->ns+state->m; m = state->m; nn = nx-m; /* * Allocate output, set to zero */ rvectorsetlengthatleast(r, nx, _state); for(i=0; i<=nx-1; i++) { r->ptr.p_double[i] = (double)(0); } for(i=0; i<=nn-1; i++) { j0 = state->at.ridx.ptr.p_int[state->basis.nidx.ptr.p_int[i]]; j1 = state->at.ridx.ptr.p_int[state->basis.nidx.ptr.p_int[i]+1]-1; v = (double)(0); for(j=j0; j<=j1; j++) { v = v+state->at.vals.ptr.p_double[j]*y->ptr.p_double[state->at.idx.ptr.p_int[j]]; } r->ptr.p_double[state->basis.nidx.ptr.p_int[i]] = v; } } /************************************************************************* Returns True if I-th lower bound is present -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_hasbndl(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state) { ae_int_t k; ae_bool result; k = subproblem->bndt.ptr.p_int[i]; result = ae_false; if( (k==0||k==1)||k==3 ) { result = ae_true; return result; } if( k==2||k==4 ) { result = ae_false; return result; } ae_assert(ae_false, "HasBndL: integrity check failed", _state); return result; } /************************************************************************* Returns True if I-th upper bound is present -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_hasbndu(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state) { ae_int_t k; ae_bool result; k = subproblem->bndt.ptr.p_int[i]; result = ae_false; if( (k==0||k==2)||k==3 ) { result = ae_true; return result; } if( k==1||k==4 ) { result = ae_false; return result; } ae_assert(ae_false, "HasBndL: integrity check failed", _state); return result; } /************************************************************************* Returns True if I-th variable if free -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_isfree(dualsimplexsubproblem* subproblem, ae_int_t i, ae_state *_state) { ae_int_t k; ae_bool result; k = subproblem->bndt.ptr.p_int[i]; result = ae_false; if( ((k==0||k==1)||k==2)||k==3 ) { result = ae_false; return result; } if( k==4 ) { result = ae_true; return result; } ae_assert(ae_false, "IsFree: integrity check failed", _state); return result; } /************************************************************************* Downgrades problem state to the specified one (if status is lower than one specified by user, nothing is changed) -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_downgradestate(dualsimplexsubproblem* subproblem, ae_int_t s, ae_state *_state) { subproblem->state = ae_minint(subproblem->state, s, _state); } /************************************************************************* Returns maximum dual infeasibility (only non-basic variables are checked, we assume that basic variables are good enough). -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static double reviseddualsimplex_dualfeasibilityerror(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t nn; ae_int_t bndt; double result; nn = s->ns; ae_assert(s->state==reviseddualsimplex_ssvalid, "DualFeasibilityError: invalid X", _state); result = (double)(0); for(i=0; i<=nn-1; i++) { j = state->basis.nidx.ptr.p_int[i]; bndt = s->bndt.ptr.p_int[j]; if( bndt==reviseddualsimplex_ccfixed ) { continue; } if( bndt==reviseddualsimplex_ccrange ) { if( s->xa.ptr.p_double[j]==s->bndl.ptr.p_double[j] ) { result = ae_maxreal(result, -s->d.ptr.p_double[j], _state); continue; } if( s->xa.ptr.p_double[j]==s->bndu.ptr.p_double[j] ) { result = ae_maxreal(result, s->d.ptr.p_double[j], _state); continue; } ae_assert(ae_false, "DualFeasibilityError: integrity check failed", _state); } if( bndt==reviseddualsimplex_cclower ) { ae_assert(s->xa.ptr.p_double[j]==s->bndl.ptr.p_double[j], "DualFeasibilityError: integrity check failed", _state); result = ae_maxreal(result, -s->d.ptr.p_double[j], _state); continue; } if( bndt==reviseddualsimplex_ccupper ) { ae_assert(s->xa.ptr.p_double[j]==s->bndu.ptr.p_double[j], "DualFeasibilityError: integrity check failed", _state); result = ae_maxreal(result, s->d.ptr.p_double[j], _state); continue; } if( bndt==reviseddualsimplex_ccfree ) { result = ae_maxreal(result, ae_fabs(s->d.ptr.p_double[j], _state), _state); continue; } ae_assert(ae_false, "DSSOptimize: integrity check failed (infeasible constraint)", _state); } return result; } /************************************************************************* Returns True for dual feasible basis (some minor dual feasibility error is allowed), False otherwise -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool reviseddualsimplex_isdualfeasible(dualsimplexstate* state, dualsimplexsubproblem* s, ae_state *_state) { ae_bool result; result = ae_fp_less_eq(reviseddualsimplex_dualfeasibilityerror(state, s, _state),reviseddualsimplex_dtol); return result; } /************************************************************************* Transforms sequence of pivot permutations P0*P1*...*Pm to forward/backward permutation representation. -- ALGLIB -- Copyright 12.09.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_pivottobwd(/* Integer */ ae_vector* p, ae_int_t m, /* Integer */ ae_vector* bwd, ae_state *_state) { ae_int_t i; ae_int_t k; ae_int_t t; ivectorsetlengthatleast(bwd, m, _state); for(i=0; i<=m-1; i++) { bwd->ptr.p_int[i] = i; } for(i=0; i<=m-1; i++) { k = p->ptr.p_int[i]; if( k!=i ) { t = bwd->ptr.p_int[i]; bwd->ptr.p_int[i] = bwd->ptr.p_int[k]; bwd->ptr.p_int[k] = t; } } } /************************************************************************* Applies inverse cyclic permutation of [D,M-1) (element D is moved to the end, the rest of elements is shifted one position backward) to the already existing permutation. -- ALGLIB -- Copyright 12.09.2018 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_inversecyclicpermutation(/* Integer */ ae_vector* bwd, ae_int_t m, ae_int_t d, /* Integer */ ae_vector* tmpi, ae_state *_state) { ae_int_t i; ae_int_t k; /* * update Bwd[] */ k = bwd->ptr.p_int[d]; for(i=d; i<=m-2; i++) { bwd->ptr.p_int[i] = bwd->ptr.p_int[i+1]; } bwd->ptr.p_int[m-1] = k; } /************************************************************************* Offloads basic components of X[], BndT[], BndL[], BndU[] to XB/BndTB/BndLB/BndUB. -- ALGLIB -- Copyright 24.01.2019 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_offloadbasiccomponents(dualsimplexsubproblem* s, dualsimplexbasis* basis, ae_state *_state) { ae_int_t i; ae_int_t m; m = basis->m; for(i=0; i<=m-1; i++) { s->xb.ptr.p_double[i] = s->xa.ptr.p_double[basis->idx.ptr.p_int[i]]; s->bndlb.ptr.p_double[i] = s->bndl.ptr.p_double[basis->idx.ptr.p_int[i]]; s->bndub.ptr.p_double[i] = s->bndu.ptr.p_double[basis->idx.ptr.p_int[i]]; s->bndtb.ptr.p_int[i] = s->bndt.ptr.p_int[basis->idx.ptr.p_int[i]]; } } /************************************************************************* Recombines basic and non-basic components in X[] -- ALGLIB -- Copyright 24.01.2019 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_recombinebasicnonbasicx(dualsimplexsubproblem* s, dualsimplexbasis* basis, ae_state *_state) { ae_int_t m; ae_int_t i; m = basis->m; for(i=0; i<=m-1; i++) { s->xa.ptr.p_double[basis->idx.ptr.p_int[i]] = s->xb.ptr.p_double[i]; } } /************************************************************************* Restores original scale of the variables, enforces box constraints (just to be sure) -- ALGLIB -- Copyright 24.01.2019 by Bochkanov Sergey *************************************************************************/ static void reviseddualsimplex_unscaleandenforce(/* Real */ ae_vector* repx, /* Real */ ae_vector* repy, /* Real */ ae_vector* repdx, dualsimplexstate* s, ae_state *_state) { ae_int_t i; ae_int_t ns; ae_int_t m; ns = s->ns; m = s->m; for(i=0; i<=ns-1; i++) { if( s->repstats.ptr.p_int[i]<0 ) { repx->ptr.p_double[i] = s->rawbndl.ptr.p_double[i]; continue; } if( s->repstats.ptr.p_int[i]>0 ) { repx->ptr.p_double[i] = s->rawbndu.ptr.p_double[i]; continue; } repx->ptr.p_double[i] = repx->ptr.p_double[i]*s->varscales.ptr.p_double[i]; if( ae_isfinite(s->rawbndl.ptr.p_double[i], _state) ) { repx->ptr.p_double[i] = ae_maxreal(repx->ptr.p_double[i], s->rawbndl.ptr.p_double[i], _state); } if( ae_isfinite(s->rawbndu.ptr.p_double[i], _state) ) { repx->ptr.p_double[i] = ae_minreal(repx->ptr.p_double[i], s->rawbndu.ptr.p_double[i], _state); } } for(i=0; i<=m-1; i++) { repy->ptr.p_double[i] = repy->ptr.p_double[i]/s->rowscales.ptr.p_double[i]; repdx->ptr.p_double[i] = repdx->ptr.p_double[i]/s->rowscales.ptr.p_double[i]; } } void _dualsimplexsettings_init(void* _p, ae_state *_state, ae_bool make_automatic) { dualsimplexsettings *p = (dualsimplexsettings*)_p; ae_touch_ptr((void*)p); } void _dualsimplexsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { dualsimplexsettings *dst = (dualsimplexsettings*)_dst; dualsimplexsettings *src = (dualsimplexsettings*)_src; dst->pivottol = src->pivottol; dst->perturbmag = src->perturbmag; dst->maxtrfage = src->maxtrfage; dst->trftype = src->trftype; dst->ratiotest = src->ratiotest; dst->pricing = src->pricing; dst->shifting = src->shifting; } void _dualsimplexsettings_clear(void* _p) { dualsimplexsettings *p = (dualsimplexsettings*)_p; ae_touch_ptr((void*)p); } void _dualsimplexsettings_destroy(void* _p) { dualsimplexsettings *p = (dualsimplexsettings*)_p; ae_touch_ptr((void*)p); } void _dualsimplexbasis_init(void* _p, ae_state *_state, ae_bool make_automatic) { dualsimplexbasis *p = (dualsimplexbasis*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->idx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->nidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->isbasic, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->denselu, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsel, _state, make_automatic); _sparsematrix_init(&p->sparseu, _state, make_automatic); _sparsematrix_init(&p->sparseut, _state, make_automatic); ae_vector_init(&p->rowpermbwd, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->colpermbwd, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->densepfieta, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->densemu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rk, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->dk, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->dseweights, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->wtmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->wtmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->wtmp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nrs, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->tcinvidx, 0, DT_INT, _state, make_automatic); ae_matrix_init(&p->denselu2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->densep2, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->densep2c, 0, DT_INT, _state, make_automatic); _sparsematrix_init(&p->sparselu1, _state, make_automatic); _sparsematrix_init(&p->sparselu2, _state, make_automatic); _sluv2buffer_init(&p->lubuf2, _state, make_automatic); ae_vector_init(&p->tmpi, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->utmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->utmpi, 0, DT_INT, _state, make_automatic); _sparsematrix_init(&p->sparseludbg, _state, make_automatic); } void _dualsimplexbasis_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { dualsimplexbasis *dst = (dualsimplexbasis*)_dst; dualsimplexbasis *src = (dualsimplexbasis*)_src; dst->ns = src->ns; dst->m = src->m; ae_vector_init_copy(&dst->idx, &src->idx, _state, make_automatic); ae_vector_init_copy(&dst->nidx, &src->nidx, _state, make_automatic); ae_vector_init_copy(&dst->isbasic, &src->isbasic, _state, make_automatic); dst->trftype = src->trftype; dst->isvalidtrf = src->isvalidtrf; dst->trfage = src->trfage; ae_matrix_init_copy(&dst->denselu, &src->denselu, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsel, &src->sparsel, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseu, &src->sparseu, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseut, &src->sparseut, _state, make_automatic); ae_vector_init_copy(&dst->rowpermbwd, &src->rowpermbwd, _state, make_automatic); ae_vector_init_copy(&dst->colpermbwd, &src->colpermbwd, _state, make_automatic); ae_vector_init_copy(&dst->densepfieta, &src->densepfieta, _state, make_automatic); ae_vector_init_copy(&dst->densemu, &src->densemu, _state, make_automatic); ae_vector_init_copy(&dst->rk, &src->rk, _state, make_automatic); ae_vector_init_copy(&dst->dk, &src->dk, _state, make_automatic); ae_vector_init_copy(&dst->dseweights, &src->dseweights, _state, make_automatic); dst->dsevalid = src->dsevalid; dst->eminu = src->eminu; ae_vector_init_copy(&dst->wtmp0, &src->wtmp0, _state, make_automatic); ae_vector_init_copy(&dst->wtmp1, &src->wtmp1, _state, make_automatic); ae_vector_init_copy(&dst->wtmp2, &src->wtmp2, _state, make_automatic); ae_vector_init_copy(&dst->nrs, &src->nrs, _state, make_automatic); ae_vector_init_copy(&dst->tcinvidx, &src->tcinvidx, _state, make_automatic); ae_matrix_init_copy(&dst->denselu2, &src->denselu2, _state, make_automatic); ae_vector_init_copy(&dst->densep2, &src->densep2, _state, make_automatic); ae_vector_init_copy(&dst->densep2c, &src->densep2c, _state, make_automatic); _sparsematrix_init_copy(&dst->sparselu1, &src->sparselu1, _state, make_automatic); _sparsematrix_init_copy(&dst->sparselu2, &src->sparselu2, _state, make_automatic); _sluv2buffer_init_copy(&dst->lubuf2, &src->lubuf2, _state, make_automatic); ae_vector_init_copy(&dst->tmpi, &src->tmpi, _state, make_automatic); ae_vector_init_copy(&dst->utmp0, &src->utmp0, _state, make_automatic); ae_vector_init_copy(&dst->utmpi, &src->utmpi, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseludbg, &src->sparseludbg, _state, make_automatic); } void _dualsimplexbasis_clear(void* _p) { dualsimplexbasis *p = (dualsimplexbasis*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->idx); ae_vector_clear(&p->nidx); ae_vector_clear(&p->isbasic); ae_matrix_clear(&p->denselu); _sparsematrix_clear(&p->sparsel); _sparsematrix_clear(&p->sparseu); _sparsematrix_clear(&p->sparseut); ae_vector_clear(&p->rowpermbwd); ae_vector_clear(&p->colpermbwd); ae_vector_clear(&p->densepfieta); ae_vector_clear(&p->densemu); ae_vector_clear(&p->rk); ae_vector_clear(&p->dk); ae_vector_clear(&p->dseweights); ae_vector_clear(&p->wtmp0); ae_vector_clear(&p->wtmp1); ae_vector_clear(&p->wtmp2); ae_vector_clear(&p->nrs); ae_vector_clear(&p->tcinvidx); ae_matrix_clear(&p->denselu2); ae_vector_clear(&p->densep2); ae_vector_clear(&p->densep2c); _sparsematrix_clear(&p->sparselu1); _sparsematrix_clear(&p->sparselu2); _sluv2buffer_clear(&p->lubuf2); ae_vector_clear(&p->tmpi); ae_vector_clear(&p->utmp0); ae_vector_clear(&p->utmpi); _sparsematrix_clear(&p->sparseludbg); } void _dualsimplexbasis_destroy(void* _p) { dualsimplexbasis *p = (dualsimplexbasis*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->idx); ae_vector_destroy(&p->nidx); ae_vector_destroy(&p->isbasic); ae_matrix_destroy(&p->denselu); _sparsematrix_destroy(&p->sparsel); _sparsematrix_destroy(&p->sparseu); _sparsematrix_destroy(&p->sparseut); ae_vector_destroy(&p->rowpermbwd); ae_vector_destroy(&p->colpermbwd); ae_vector_destroy(&p->densepfieta); ae_vector_destroy(&p->densemu); ae_vector_destroy(&p->rk); ae_vector_destroy(&p->dk); ae_vector_destroy(&p->dseweights); ae_vector_destroy(&p->wtmp0); ae_vector_destroy(&p->wtmp1); ae_vector_destroy(&p->wtmp2); ae_vector_destroy(&p->nrs); ae_vector_destroy(&p->tcinvidx); ae_matrix_destroy(&p->denselu2); ae_vector_destroy(&p->densep2); ae_vector_destroy(&p->densep2c); _sparsematrix_destroy(&p->sparselu1); _sparsematrix_destroy(&p->sparselu2); _sluv2buffer_destroy(&p->lubuf2); ae_vector_destroy(&p->tmpi); ae_vector_destroy(&p->utmp0); ae_vector_destroy(&p->utmpi); _sparsematrix_destroy(&p->sparseludbg); } void _dualsimplexsubproblem_init(void* _p, ae_state *_state, ae_bool make_automatic) { dualsimplexsubproblem *p = (dualsimplexsubproblem*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->rawc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndt, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->xa, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndlb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndub, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndtb, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->effc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->colscales, 0, DT_REAL, _state, make_automatic); } void _dualsimplexsubproblem_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { dualsimplexsubproblem *dst = (dualsimplexsubproblem*)_dst; dualsimplexsubproblem *src = (dualsimplexsubproblem*)_src; dst->ns = src->ns; dst->m = src->m; ae_vector_init_copy(&dst->rawc, &src->rawc, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->bndt, &src->bndt, _state, make_automatic); ae_vector_init_copy(&dst->xa, &src->xa, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->state = src->state; ae_vector_init_copy(&dst->xb, &src->xb, _state, make_automatic); ae_vector_init_copy(&dst->bndlb, &src->bndlb, _state, make_automatic); ae_vector_init_copy(&dst->bndub, &src->bndub, _state, make_automatic); ae_vector_init_copy(&dst->bndtb, &src->bndtb, _state, make_automatic); ae_vector_init_copy(&dst->effc, &src->effc, _state, make_automatic); ae_vector_init_copy(&dst->colscales, &src->colscales, _state, make_automatic); } void _dualsimplexsubproblem_clear(void* _p) { dualsimplexsubproblem *p = (dualsimplexsubproblem*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->rawc); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->bndt); ae_vector_clear(&p->xa); ae_vector_clear(&p->d); ae_vector_clear(&p->xb); ae_vector_clear(&p->bndlb); ae_vector_clear(&p->bndub); ae_vector_clear(&p->bndtb); ae_vector_clear(&p->effc); ae_vector_clear(&p->colscales); } void _dualsimplexsubproblem_destroy(void* _p) { dualsimplexsubproblem *p = (dualsimplexsubproblem*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->rawc); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->bndt); ae_vector_destroy(&p->xa); ae_vector_destroy(&p->d); ae_vector_destroy(&p->xb); ae_vector_destroy(&p->bndlb); ae_vector_destroy(&p->bndub); ae_vector_destroy(&p->bndtb); ae_vector_destroy(&p->effc); ae_vector_destroy(&p->colscales); } void _dualsimplexstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { dualsimplexstate *p = (dualsimplexstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->varscales, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rowscales, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rawbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rawbndu, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->a, _state, make_automatic); _sparsematrix_init(&p->at, _state, make_automatic); _dualsimplexbasis_init(&p->basis, _state, make_automatic); _dualsimplexsubproblem_init(&p->primary, _state, make_automatic); _dualsimplexsubproblem_init(&p->phase1, _state, make_automatic); _dualsimplexsubproblem_init(&p->phase3, _state, make_automatic); ae_vector_init(&p->repx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->repy, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->repdx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->repstats, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->possibleflips, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->dfctmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dfctmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dfctmp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ustmpi, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->alphar, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rhor, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tau, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->alphaq, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->alphaqim, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->eligibleset, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->harrisset, 0, DT_INT, _state, make_automatic); } void _dualsimplexstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { dualsimplexstate *dst = (dualsimplexstate*)_dst; dualsimplexstate *src = (dualsimplexstate*)_src; ae_vector_init_copy(&dst->varscales, &src->varscales, _state, make_automatic); ae_vector_init_copy(&dst->rowscales, &src->rowscales, _state, make_automatic); ae_vector_init_copy(&dst->rawbndl, &src->rawbndl, _state, make_automatic); ae_vector_init_copy(&dst->rawbndu, &src->rawbndu, _state, make_automatic); dst->ns = src->ns; dst->m = src->m; _sparsematrix_init_copy(&dst->a, &src->a, _state, make_automatic); _sparsematrix_init_copy(&dst->at, &src->at, _state, make_automatic); _dualsimplexbasis_init_copy(&dst->basis, &src->basis, _state, make_automatic); _dualsimplexsubproblem_init_copy(&dst->primary, &src->primary, _state, make_automatic); _dualsimplexsubproblem_init_copy(&dst->phase1, &src->phase1, _state, make_automatic); _dualsimplexsubproblem_init_copy(&dst->phase3, &src->phase3, _state, make_automatic); ae_vector_init_copy(&dst->repx, &src->repx, _state, make_automatic); ae_vector_init_copy(&dst->repy, &src->repy, _state, make_automatic); ae_vector_init_copy(&dst->repdx, &src->repdx, _state, make_automatic); ae_vector_init_copy(&dst->repstats, &src->repstats, _state, make_automatic); dst->repf = src->repf; dst->repprimalerror = src->repprimalerror; dst->repdualerror = src->repdualerror; dst->repterminationtype = src->repterminationtype; dst->repiterationscount = src->repiterationscount; dst->repiterationscount1 = src->repiterationscount1; dst->repiterationscount2 = src->repiterationscount2; dst->repiterationscount3 = src->repiterationscount3; ae_vector_init_copy(&dst->possibleflips, &src->possibleflips, _state, make_automatic); dst->possibleflipscnt = src->possibleflipscnt; ae_vector_init_copy(&dst->dfctmp0, &src->dfctmp0, _state, make_automatic); ae_vector_init_copy(&dst->dfctmp1, &src->dfctmp1, _state, make_automatic); ae_vector_init_copy(&dst->dfctmp2, &src->dfctmp2, _state, make_automatic); ae_vector_init_copy(&dst->ustmpi, &src->ustmpi, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_vector_init_copy(&dst->alphar, &src->alphar, _state, make_automatic); ae_vector_init_copy(&dst->rhor, &src->rhor, _state, make_automatic); ae_vector_init_copy(&dst->tau, &src->tau, _state, make_automatic); ae_vector_init_copy(&dst->alphaq, &src->alphaq, _state, make_automatic); ae_vector_init_copy(&dst->alphaqim, &src->alphaqim, _state, make_automatic); ae_vector_init_copy(&dst->eligibleset, &src->eligibleset, _state, make_automatic); ae_vector_init_copy(&dst->harrisset, &src->harrisset, _state, make_automatic); } void _dualsimplexstate_clear(void* _p) { dualsimplexstate *p = (dualsimplexstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->varscales); ae_vector_clear(&p->rowscales); ae_vector_clear(&p->rawbndl); ae_vector_clear(&p->rawbndu); _sparsematrix_clear(&p->a); _sparsematrix_clear(&p->at); _dualsimplexbasis_clear(&p->basis); _dualsimplexsubproblem_clear(&p->primary); _dualsimplexsubproblem_clear(&p->phase1); _dualsimplexsubproblem_clear(&p->phase3); ae_vector_clear(&p->repx); ae_vector_clear(&p->repy); ae_vector_clear(&p->repdx); ae_vector_clear(&p->repstats); ae_vector_clear(&p->possibleflips); ae_vector_clear(&p->dfctmp0); ae_vector_clear(&p->dfctmp1); ae_vector_clear(&p->dfctmp2); ae_vector_clear(&p->ustmpi); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->tmp2); ae_vector_clear(&p->alphar); ae_vector_clear(&p->rhor); ae_vector_clear(&p->tau); ae_vector_clear(&p->alphaq); ae_vector_clear(&p->alphaqim); ae_vector_clear(&p->eligibleset); ae_vector_clear(&p->harrisset); } void _dualsimplexstate_destroy(void* _p) { dualsimplexstate *p = (dualsimplexstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->varscales); ae_vector_destroy(&p->rowscales); ae_vector_destroy(&p->rawbndl); ae_vector_destroy(&p->rawbndu); _sparsematrix_destroy(&p->a); _sparsematrix_destroy(&p->at); _dualsimplexbasis_destroy(&p->basis); _dualsimplexsubproblem_destroy(&p->primary); _dualsimplexsubproblem_destroy(&p->phase1); _dualsimplexsubproblem_destroy(&p->phase3); ae_vector_destroy(&p->repx); ae_vector_destroy(&p->repy); ae_vector_destroy(&p->repdx); ae_vector_destroy(&p->repstats); ae_vector_destroy(&p->possibleflips); ae_vector_destroy(&p->dfctmp0); ae_vector_destroy(&p->dfctmp1); ae_vector_destroy(&p->dfctmp2); ae_vector_destroy(&p->ustmpi); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->tmp2); ae_vector_destroy(&p->alphar); ae_vector_destroy(&p->rhor); ae_vector_destroy(&p->tau); ae_vector_destroy(&p->alphaq); ae_vector_destroy(&p->alphaqim); ae_vector_destroy(&p->eligibleset); ae_vector_destroy(&p->harrisset); } #endif #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* LINEAR PROGRAMMING The subroutine creates LP solver. After initial creation it contains default optimization problem with zero cost vector and all variables being fixed to zero values and no constraints. In order to actually solve something you should: * set cost vector with minlpsetcost() * set variable bounds with minlpsetbc() or minlpsetbcall() * specify constraint matrix with one of the following functions: [*] minlpsetlc() for dense one-sided constraints [*] minlpsetlc2dense() for dense two-sided constraints [*] minlpsetlc2() for sparse two-sided constraints [*] minlpaddlc2dense() to add one dense row to constraint matrix [*] minlpaddlc2() to add one row to constraint matrix (compressed format) * call minlpoptimize() to run the solver and minlpresults() to get the solution vector and additional information. Presently this optimizer supports only revised simplex method as underlying solver. DSE pricing and bounds flipping ratio test (aka long dual step) are supported. Large-scale sparse LU solver with Forest-Tomlin is used internally as linear algebra driver. Future releases of ALGLIB may introduce other solvers. INPUT PARAMETERS: N - problem size OUTPUT PARAMETERS: State - optimizer in the default state -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpcreate(ae_int_t n, minlpstate* state, ae_state *_state) { ae_int_t i; _minlpstate_clear(state); ae_assert(n>=1, "MinLPCreate: N<1", _state); /* * Initialize */ state->n = n; state->m = 0; state->algokind = 1; ae_vector_set_length(&state->c, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->xs, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = (double)(0); state->bndu.ptr.p_double[i] = (double)(0); state->c.ptr.p_double[i] = 0.0; state->s.ptr.p_double[i] = 1.0; state->xs.ptr.p_double[i] = 1.0; } minlp_clearreportfields(state, _state); } /************************************************************************* This function sets cost term for LP solver. By default, cost term is zero. INPUT PARAMETERS: State - structure which stores algorithm state C - cost term, array[N]. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetcost(minlpstate* state, /* Real */ ae_vector* c, ae_state *_state) { ae_int_t n; ae_int_t i; n = state->n; ae_assert(c->cnt>=n, "MinLPSetCost: Length(C)c.ptr.p_double[i] = c->ptr.p_double[i]; } } /************************************************************************* This function sets scaling coefficients. ALGLIB optimizers use scaling matrices to test stopping conditions and as preconditioner. Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetscale(minlpstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinLPSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinLPSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinLPSetScale: S contains zero elements", _state); } for(i=0; i<=state->n-1; i++) { state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* This function sets box constraints for LP solver (all variables at once, different constraints for different variables). The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Constraint status is preserved until constraints are explicitly overwritten with another minlpsetbc() call, overwritten with minlpsetbcall(), or partially overwritten with minlmsetbci() call. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. BndU - upper bounds, array[N]. NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: if constraints for all variables are same you may use minlpsetbcall() which allows to specify constraints without using arrays. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbc(minlpstate* state, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(bndl->cnt>=n, "MinLPSetBC: Length(BndL)cnt>=n, "MinLPSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinLPSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinLPSetBC: BndU contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; } } /************************************************************************* This function sets box constraints for LP solver (all variables at once, same constraints for all variables) The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Constraint status is preserved until constraints are explicitly overwritten with another minlpsetbc() call or partially overwritten with minlpsetbcall(). Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bound, same for all variables BndU - upper bound, same for all variables NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: minlpsetbc() can be used to specify different constraints for different variables. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbcall(minlpstate* state, double bndl, double bndu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(ae_isfinite(bndl, _state)||ae_isneginf(bndl, _state), "MinLPSetBCAll: BndL is NAN or +INF", _state); ae_assert(ae_isfinite(bndu, _state)||ae_isposinf(bndu, _state), "MinLPSetBCAll: BndU is NAN or -INF", _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = bndl; state->bndu.ptr.p_double[i] = bndu; } } /************************************************************************* This function sets box constraints for I-th variable (other variables are not modified). The default state of constraints is to have all variables fixed at zero. You have to overwrite it by your own constraint vector. Following types of constraints are supported: DESCRIPTION CONSTRAINT HOW TO SPECIFY fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] lower bound BndL[i]<=x[i] BndU[i]=+INF upper bound x[i]<=BndU[i] BndL[i]=-INF range BndL[i]<=x[i]<=BndU[i] ... free variable - BndL[I]=-INF, BndU[I]+INF INPUT PARAMETERS: State - structure stores algorithm state I - variable index, in [0,N) BndL - lower bound for I-th variable BndU - upper bound for I-th variable NOTE: infinite values can be specified by means of Double.PositiveInfinity and Double.NegativeInfinity (in C#) and alglib::fp_posinf and alglib::fp_neginf (in C++). NOTE: you may replace infinities by very small/very large values, but it is not recommended because large numbers may introduce large numerical errors in the algorithm. NOTE: minlpsetbc() can be used to specify different constraints for different variables. NOTE: BndL>BndU will result in LP problem being recognized as infeasible. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetbci(minlpstate* state, ae_int_t i, double bndl, double bndu, ae_state *_state) { ae_int_t n; n = state->n; ae_assert(i>=0&&ibndl.ptr.p_double[i] = bndl; state->bndu.ptr.p_double[i] = bndu; } /************************************************************************* This function sets one-sided linear constraints A*x ~ AU, where "~" can be a mix of "<=", "=" and ">=". IMPORTANT: this function is provided here for compatibility with the rest of ALGLIB optimizers which accept constraints in format like this one. Many real-life problems feature two-sided constraints like a0 <= a*x <= a1. It is really inefficient to add them as a pair of one-sided constraints. Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2() (or its sparse version) wherever possible. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N+1]. Each row of A represents one constraint, with first N elements being linear coefficients, and last element being right side. CT - constraint types, array[K]: * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n] * if CT[i]=0, then I-th constraint is A[i,*]*x = A[i,n] * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n] K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A and CT. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc(minlpstate* state, /* Real */ ae_matrix* a, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_frame _frame_block; ae_vector al; ae_vector au; ae_int_t n; ae_int_t i; ae_frame_make(_state, &_frame_block); memset(&al, 0, sizeof(al)); memset(&au, 0, sizeof(au)); ae_vector_init(&al, 0, DT_REAL, _state, ae_true); ae_vector_init(&au, 0, DT_REAL, _state, ae_true); n = state->n; ae_assert(k>=0, "MinLPSetLC: K<0", _state); ae_assert(k==0||a->cols>=n+1, "MinLPSetLC: Cols(A)rows>=k, "MinLPSetLC: Rows(A)cnt>=k, "MinLPSetLC: Length(CT)m = 0; ae_frame_leave(_state); return; } /* * Convert constraints to two-sided storage format, call another function */ ae_vector_set_length(&al, k, _state); ae_vector_set_length(&au, k, _state); for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]>0 ) { al.ptr.p_double[i] = a->ptr.pp_double[i][n]; au.ptr.p_double[i] = _state->v_posinf; continue; } if( ct->ptr.p_int[i]<0 ) { al.ptr.p_double[i] = _state->v_neginf; au.ptr.p_double[i] = a->ptr.pp_double[i][n]; continue; } al.ptr.p_double[i] = a->ptr.pp_double[i][n]; au.ptr.p_double[i] = a->ptr.pp_double[i][n]; } minlpsetlc2dense(state, a, &al, &au, k, _state); ae_frame_leave(_state); } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU. This version accepts dense matrix as input; internally LP solver uses sparse storage anyway (most LP problems are sparse), but for your convenience it may accept dense inputs. This function overwrites linear constraints set by previous calls (if such calls were made). We recommend you to use sparse version of this function unless you solve small-scale LP problem (less than few hundreds of variables). NOTE: there also exist several versions of this function: * one-sided dense version which accepts constraints in the same format as one used by QP and NLP solvers * two-sided sparse version which accepts sparse matrix * two-sided dense version which allows you to add constraints row by row * two-sided sparse version which allows you to add constraints row by row INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraints, array[K,N]. Each row of A represents one constraint. One-sided inequality constraints, two- sided inequality constraints, equality constraints are supported (see below) AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0; if not given, inferred from sizes of A, AL, AU. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc2dense(minlpstate* state, /* Real */ ae_matrix* a, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_int_t k, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_int_t j; ae_int_t n; ae_int_t nz; ae_vector nrs; ae_frame_make(_state, &_frame_block); memset(&nrs, 0, sizeof(nrs)); ae_vector_init(&nrs, 0, DT_INT, _state, ae_true); n = state->n; ae_assert(k>=0, "MinLPSetLC2Dense: K<0", _state); ae_assert(k==0||a->cols>=n, "MinLPSetLC2Dense: Cols(A)rows>=k, "MinLPSetLC2Dense: Rows(A)cnt>=k, "MinLPSetLC2Dense: Length(AL)cnt>=k, "MinLPSetLC2Dense: Length(AU)m = k; if( state->m==0 ) { ae_frame_leave(_state); return; } for(i=0; i<=k-1; i++) { ae_assert(ae_isfinite(al->ptr.p_double[i], _state)||ae_isneginf(al->ptr.p_double[i], _state), "MinLPSetLC2Dense: AL contains NAN or +INF", _state); ae_assert(ae_isfinite(au->ptr.p_double[i], _state)||ae_isposinf(au->ptr.p_double[i], _state), "MinLPSetLC2Dense: AU contains NAN or -INF", _state); nz = 0; for(j=0; j<=n-1; j++) { if( ae_fp_neq(a->ptr.pp_double[i][j],(double)(0)) ) { inc(&nz, _state); } } nrs.ptr.p_int[i] = nz; } /* * Allocate storage, copy */ rvectorsetlengthatleast(&state->al, state->m, _state); rvectorsetlengthatleast(&state->au, state->m, _state); sparsecreatecrsbuf(state->m, n, &nrs, &state->a, _state); for(i=0; i<=k-1; i++) { for(j=0; j<=n-1; j++) { if( ae_fp_neq(a->ptr.pp_double[i][j],(double)(0)) ) { sparseset(&state->a, i, j, a->ptr.pp_double[i][j], _state); } } state->al.ptr.p_double[i] = al->ptr.p_double[i]; state->au.ptr.p_double[i] = au->ptr.p_double[i]; } ae_frame_leave(_state); } /************************************************************************* This function sets two-sided linear constraints AL <= A*x <= AU with sparse constraining matrix A. Recommended for large-scale problems. This function overwrites linear (non-box) constraints set by previous calls (if such calls were made). INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - sparse matrix with size [K,N] (exactly!). Each row of A represents one general linear constraint. A can be stored in any sparse storage format. AL, AU - lower and upper bounds, array[K]; * AL[i]=AU[i] => equality constraint Ai*x * AL[i] two-sided constraint AL[i]<=Ai*x<=AU[i] * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x * AL[i]=-INF, AU[i]=+INF => constraint is ignored K - number of equality/inequality constraints, K>=0. If K=0 is specified, A, AL, AU are ignored. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpsetlc2(minlpstate* state, sparsematrix* a, /* Real */ ae_vector* al, /* Real */ ae_vector* au, ae_int_t k, ae_state *_state) { ae_int_t n; ae_int_t i; n = state->n; /* * Quick exit */ if( k==0 ) { state->m = 0; return; } /* * Integrity checks */ ae_assert(k>0, "MinLPSetLC2: K<0", _state); ae_assert(sparsegetncols(a, _state)==n, "MinLPSetLC2: Cols(A)<>N", _state); ae_assert(sparsegetnrows(a, _state)==k, "MinLPSetLC2: Rows(A)<>K", _state); ae_assert(al->cnt>=k, "MinLPSetLC2: Length(AL)cnt>=k, "MinLPSetLC2: Length(AU)ptr.p_double[i], _state)||ae_isneginf(al->ptr.p_double[i], _state), "MinLPSetLC2: AL contains NAN or +INF", _state); ae_assert(ae_isfinite(au->ptr.p_double[i], _state)||ae_isposinf(au->ptr.p_double[i], _state), "MinLPSetLC2: AU contains NAN or -INF", _state); } /* * Copy */ state->m = k; sparsecopytocrsbuf(a, &state->a, _state); rvectorsetlengthatleast(&state->al, k, _state); rvectorsetlengthatleast(&state->au, k, _state); for(i=0; i<=k-1; i++) { state->al.ptr.p_double[i] = al->ptr.p_double[i]; state->au.ptr.p_double[i] = au->ptr.p_double[i]; } } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present constraints. This version accepts dense constraint vector as input, but sparsifies it for internal storage and processing. Thus, time to add one constraint in is O(N) - we have to scan entire array of length N. Sparse version of this function is order of magnitude faster for constraints with just a few nonzeros per row. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. A - linear constraint coefficient, array[N], right side is NOT included. AL, AU - lower and upper bounds; * AL=AU => equality constraint Ai*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint Ai*x<=AU * AU=+INF => one-sided constraint AL<=Ai*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpaddlc2dense(minlpstate* state, /* Real */ ae_vector* a, double al, double au, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nnz; n = state->n; ae_assert(a->cnt>=n, "MinLPAddLC2Dense: Length(A)adddtmpi, n, _state); rvectorsetlengthatleast(&state->adddtmpr, n, _state); nnz = 0; for(i=0; i<=n-1; i++) { if( a->ptr.p_double[i]!=0.0 ) { state->adddtmpi.ptr.p_int[nnz] = i; state->adddtmpr.ptr.p_double[nnz] = a->ptr.p_double[i]; nnz = nnz+1; } } minlpaddlc2(state, &state->adddtmpi, &state->adddtmpr, nnz, al, au, _state); } /************************************************************************* This function appends two-sided linear constraint AL <= A*x <= AU to the list of currently present constraints. Constraint is passed in compressed format: as list of non-zero entries of coefficient vector A. Such approach is more efficient than dense storage for highly sparse constraint vectors. INPUT PARAMETERS: State - structure previously allocated with minlpcreate() call. IdxA - array[NNZ], indexes of non-zero elements of A: * can be unsorted * can include duplicate indexes (corresponding entries of ValA[] will be summed) ValA - array[NNZ], values of non-zero elements of A NNZ - number of non-zero coefficients in A AL, AU - lower and upper bounds; * AL=AU => equality constraint A*x * AL two-sided constraint AL<=A*x<=AU * AL=-INF => one-sided constraint A*x<=AU * AU=+INF => one-sided constraint AL<=A*x * AL=-INF, AU=+INF => constraint is ignored -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey *************************************************************************/ void minlpaddlc2(minlpstate* state, /* Integer */ ae_vector* idxa, /* Real */ ae_vector* vala, ae_int_t nnz, double al, double au, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t offs; ae_int_t offsdst; ae_int_t m; ae_int_t n; ae_int_t didx; ae_int_t uidx; m = state->m; n = state->n; /* * Check inputs */ ae_assert(nnz>=0, "MinLPAddLC2: NNZ<0", _state); ae_assert(idxa->cnt>=nnz, "MinLPAddLC2: Length(IdxA)cnt>=nnz, "MinLPAddLC2: Length(ValA)ptr.p_int[i]>=0&&idxa->ptr.p_int[i]a.matrixtype = 1; state->a.m = 0; state->a.n = n; state->a.ninitialized = 0; ivectorsetlengthatleast(&state->a.ridx, 1, _state); state->a.ridx.ptr.p_int[0] = 0; } /* * Reallocate storage */ offs = state->a.ridx.ptr.p_int[m]; ivectorgrowto(&state->a.idx, offs+nnz, _state); rvectorgrowto(&state->a.vals, offs+nnz, _state); ivectorgrowto(&state->a.didx, m+1, _state); ivectorgrowto(&state->a.uidx, m+1, _state); ivectorgrowto(&state->a.ridx, m+2, _state); rvectorgrowto(&state->al, m+1, _state); rvectorgrowto(&state->au, m+1, _state); /* * If NNZ=0, perform quick and simple row append. */ if( nnz==0 ) { state->a.didx.ptr.p_int[m] = state->a.ridx.ptr.p_int[m]; state->a.uidx.ptr.p_int[m] = state->a.ridx.ptr.p_int[m]; state->a.ridx.ptr.p_int[m+1] = state->a.ridx.ptr.p_int[m]; state->al.ptr.p_double[m] = al; state->au.ptr.p_double[m] = au; state->a.m = m+1; state->m = m+1; return; } /* * Now we are sure that A contains properly initialized sparse * matrix (or some appropriate dummy for M=0) and we have NNZ>0 * (no need to care about degenerate cases). * * Append rows to A: * * append data * * sort in place * * merge duplicate indexes * * compute DIdx and UIdx * */ for(i=0; i<=nnz-1; i++) { state->a.idx.ptr.p_int[offs+i] = idxa->ptr.p_int[i]; state->a.vals.ptr.p_double[offs+i] = vala->ptr.p_double[i]; } tagsortmiddleir(&state->a.idx, &state->a.vals, offs, nnz, _state); offsdst = offs; for(i=1; i<=nnz-1; i++) { if( state->a.idx.ptr.p_int[offsdst]!=state->a.idx.ptr.p_int[offs+i] ) { offsdst = offsdst+1; state->a.idx.ptr.p_int[offsdst] = state->a.idx.ptr.p_int[offs+i]; state->a.vals.ptr.p_double[offsdst] = state->a.vals.ptr.p_double[offs+i]; } else { state->a.vals.ptr.p_double[offsdst] = state->a.vals.ptr.p_double[offsdst]+state->a.vals.ptr.p_double[offs+i]; } } nnz = offsdst-offs+1; uidx = -1; didx = -1; for(j=offs; j<=offsdst; j++) { k = state->a.idx.ptr.p_int[j]; if( k==m ) { didx = j; } else { if( k>m&&uidx==-1 ) { uidx = j; break; } } } if( uidx==-1 ) { uidx = offsdst+1; } if( didx==-1 ) { didx = uidx; } state->a.didx.ptr.p_int[m] = didx; state->a.uidx.ptr.p_int[m] = uidx; state->a.ridx.ptr.p_int[m+1] = offsdst+1; state->a.m = m+1; state->al.ptr.p_double[m] = al; state->au.ptr.p_double[m] = au; state->m = m+1; } /************************************************************************* This function solves LP problem. INPUT PARAMETERS: State - algorithm state You should use minlpresults() function to access results after calls to this function. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey. *************************************************************************/ void minlpoptimize(minlpstate* state, ae_state *_state) { ae_frame _frame_block; ae_int_t n; ae_int_t m; ae_int_t i; dualsimplexsettings settings; ae_matrix dummy; dualsimplexbasis dummybasis; ae_frame_make(_state, &_frame_block); memset(&settings, 0, sizeof(settings)); memset(&dummy, 0, sizeof(dummy)); memset(&dummybasis, 0, sizeof(dummybasis)); _dualsimplexsettings_init(&settings, _state, ae_true); ae_matrix_init(&dummy, 0, 0, DT_REAL, _state, ae_true); _dualsimplexbasis_init(&dummybasis, _state, ae_true); n = state->n; m = state->m; minlp_clearreportfields(state, _state); /* * Most basic check for correctness of constraints */ for(i=0; i<=n-1; i++) { if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->repterminationtype = -3; ae_frame_leave(_state); return; } } for(i=0; i<=m-1; i++) { if( ae_fp_greater(state->al.ptr.p_double[i],state->au.ptr.p_double[i]) ) { state->repterminationtype = -3; ae_frame_leave(_state); return; } } /* * Call current solver */ if( state->algokind==1 ) { /* * Dual simplex method */ dsssettingsinit(&settings, _state); dssinit(n, &state->dss, _state); dsssetproblem(&state->dss, &state->c, &state->bndl, &state->bndu, &state->s, &dummy, &state->a, 1, &state->al, &state->au, m, &dummybasis, minlp_alllogicalsbasis, &settings, _state); dssoptimize(&state->dss, &settings, _state); /* * Export results */ rvectorsetlengthatleast(&state->xs, n, _state); rvectorsetlengthatleast(&state->ys, m, _state); ivectorsetlengthatleast(&state->cs, n+m, _state); for(i=0; i<=n-1; i++) { state->xs.ptr.p_double[i] = state->dss.repx.ptr.p_double[i]; } for(i=0; i<=m-1; i++) { state->ys.ptr.p_double[i] = state->dss.repy.ptr.p_double[i]; } for(i=0; i<=n+m-1; i++) { state->cs.ptr.p_int[i] = state->dss.repstats.ptr.p_int[i]; } state->repf = state->dss.repf; state->repprimalerror = state->dss.repprimalerror; state->repdualerror = state->dss.repdualerror; state->repiterationscount = state->dss.repiterationscount; state->repterminationtype = state->dss.repterminationtype; ae_frame_leave(_state); return; } /* * Integrity check failed - unknown solver */ ae_assert(ae_false, "MinQPOptimize: integrity check failed - unknown solver", _state); ae_frame_leave(_state); } /************************************************************************* LP solver results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[N], solution. Filled by zeros on failure. Rep - optimization report. You should check Rep.TerminationType, which contains completion code, and you may check another fields which contain another information about algorithm functioning. Failure codes returned by algorithm are: * -4 LP problem is primal unbounded (dual infeasible) * -3 LP problem is primal infeasible (dual unbounded) Success codes: * 1..4 successful completion * 5 MaxIts steps was taken -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minlpresults(minlpstate* state, /* Real */ ae_vector* x, minlpreport* rep, ae_state *_state) { ae_vector_clear(x); _minlpreport_clear(rep); minlpresultsbuf(state, x, rep, _state); } /************************************************************************* LP results Buffered implementation of MinLPResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 11.01.2011 by Bochkanov Sergey *************************************************************************/ void minlpresultsbuf(minlpstate* state, /* Real */ ae_vector* x, minlpreport* rep, ae_state *_state) { ae_int_t i; if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } ae_vector_set_length(&rep->y, state->m, _state); ae_vector_set_length(&rep->stats, state->n+state->m, _state); rep->f = state->repf; rep->primalerror = state->repprimalerror; rep->dualerror = state->repdualerror; rep->iterationscount = state->repiterationscount; rep->terminationtype = state->repterminationtype; if( state->repterminationtype>0 ) { for(i=0; i<=state->n-1; i++) { x->ptr.p_double[i] = state->xs.ptr.p_double[i]; } for(i=0; i<=state->m-1; i++) { rep->y.ptr.p_double[i] = state->ys.ptr.p_double[i]; } for(i=0; i<=state->n+state->m-1; i++) { rep->stats.ptr.p_int[i] = state->cs.ptr.p_int[i]; } } else { for(i=0; i<=state->n-1; i++) { x->ptr.p_double[i] = (double)(0); } for(i=0; i<=state->m-1; i++) { rep->y.ptr.p_double[i] = (double)(0); } for(i=0; i<=state->n+state->m-1; i++) { rep->stats.ptr.p_int[i] = 0; } } } /************************************************************************* Clear report fields prior to the optimization. -- ALGLIB -- Copyright 19.07.2018 by Bochkanov Sergey. *************************************************************************/ static void minlp_clearreportfields(minlpstate* state, ae_state *_state) { state->repf = 0.0; state->repprimalerror = 0.0; state->repdualerror = 0.0; state->repiterationscount = 0; state->repterminationtype = 0; } void _minlpstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlpstate *p = (minlpstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->c, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->a, _state, make_automatic); ae_vector_init(&p->al, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->au, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ys, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cs, 0, DT_INT, _state, make_automatic); _dualsimplexstate_init(&p->dss, _state, make_automatic); ae_vector_init(&p->adddtmpi, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->adddtmpr, 0, DT_REAL, _state, make_automatic); } void _minlpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlpstate *dst = (minlpstate*)_dst; minlpstate *src = (minlpstate*)_src; dst->n = src->n; dst->algokind = src->algokind; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->c, &src->c, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); dst->m = src->m; _sparsematrix_init_copy(&dst->a, &src->a, _state, make_automatic); ae_vector_init_copy(&dst->al, &src->al, _state, make_automatic); ae_vector_init_copy(&dst->au, &src->au, _state, make_automatic); ae_vector_init_copy(&dst->xs, &src->xs, _state, make_automatic); ae_vector_init_copy(&dst->ys, &src->ys, _state, make_automatic); ae_vector_init_copy(&dst->cs, &src->cs, _state, make_automatic); dst->repf = src->repf; dst->repprimalerror = src->repprimalerror; dst->repdualerror = src->repdualerror; dst->repiterationscount = src->repiterationscount; dst->repterminationtype = src->repterminationtype; _dualsimplexstate_init_copy(&dst->dss, &src->dss, _state, make_automatic); ae_vector_init_copy(&dst->adddtmpi, &src->adddtmpi, _state, make_automatic); ae_vector_init_copy(&dst->adddtmpr, &src->adddtmpr, _state, make_automatic); } void _minlpstate_clear(void* _p) { minlpstate *p = (minlpstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_vector_clear(&p->c); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); _sparsematrix_clear(&p->a); ae_vector_clear(&p->al); ae_vector_clear(&p->au); ae_vector_clear(&p->xs); ae_vector_clear(&p->ys); ae_vector_clear(&p->cs); _dualsimplexstate_clear(&p->dss); ae_vector_clear(&p->adddtmpi); ae_vector_clear(&p->adddtmpr); } void _minlpstate_destroy(void* _p) { minlpstate *p = (minlpstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_vector_destroy(&p->c); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); _sparsematrix_destroy(&p->a); ae_vector_destroy(&p->al); ae_vector_destroy(&p->au); ae_vector_destroy(&p->xs); ae_vector_destroy(&p->ys); ae_vector_destroy(&p->cs); _dualsimplexstate_destroy(&p->dss); ae_vector_destroy(&p->adddtmpi); ae_vector_destroy(&p->adddtmpr); } void _minlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlpreport *p = (minlpreport*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->y, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stats, 0, DT_INT, _state, make_automatic); } void _minlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlpreport *dst = (minlpreport*)_dst; minlpreport *src = (minlpreport*)_src; dst->f = src->f; ae_vector_init_copy(&dst->y, &src->y, _state, make_automatic); ae_vector_init_copy(&dst->stats, &src->stats, _state, make_automatic); dst->primalerror = src->primalerror; dst->dualerror = src->dualerror; dst->iterationscount = src->iterationscount; dst->terminationtype = src->terminationtype; } void _minlpreport_clear(void* _p) { minlpreport *p = (minlpreport*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->y); ae_vector_clear(&p->stats); } void _minlpreport_destroy(void* _p) { minlpreport *p = (minlpreport*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->y); ae_vector_destroy(&p->stats); } #endif #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD) void minslpinitbuf(/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_vector* s, /* Real */ ae_vector* x0, ae_int_t n, /* Real */ ae_matrix* cleic, /* Integer */ ae_vector* lcsrcidx, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, double epsx, ae_int_t maxits, minslpstate* state, ae_state *_state) { ae_int_t nslack; ae_int_t i; ae_int_t j; double v; double vv; nslack = n+2*(nec+nlec)+(nic+nlic); state->n = n; state->nec = nec; state->nic = nic; state->nlec = nlec; state->nlic = nlic; /* * Settings */ state->hessiantype = 2; /* * Prepare RCOMM state */ ae_vector_set_length(&state->rstate.ia, 9+1, _state); ae_vector_set_length(&state->rstate.ba, 2+1, _state); ae_vector_set_length(&state->rstate.ra, 10+1, _state); state->rstate.stage = -1; state->needfij = ae_false; state->xupdated = ae_false; ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->fi, 1+nlec+nlic, _state); ae_matrix_set_length(&state->j, 1+nlec+nlic, n, _state); /* * Allocate memory. */ rvectorsetlengthatleast(&state->s, n, _state); rvectorsetlengthatleast(&state->step0x, n, _state); rvectorsetlengthatleast(&state->stepkx, n, _state); rvectorsetlengthatleast(&state->backupx, n, _state); rvectorsetlengthatleast(&state->step0fi, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->stepkfi, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->backupfi, 1+nlec+nlic, _state); rmatrixsetlengthatleast(&state->step0j, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&state->stepkj, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&state->backupj, 1+nlec+nlic, n, _state); rvectorsetlengthatleast(&state->fscales, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state->meritlagmult, nec+nic+nlec+nlic, _state); rvectorsetlengthatleast(&state->dummylagmult, nec+nic+nlec+nlic, _state); bvectorsetlengthatleast(&state->hasbndl, n, _state); bvectorsetlengthatleast(&state->hasbndu, n, _state); rvectorsetlengthatleast(&state->scaledbndl, n, _state); rvectorsetlengthatleast(&state->scaledbndu, n, _state); rmatrixsetlengthatleast(&state->scaledcleic, nec+nic, n+1, _state); ivectorsetlengthatleast(&state->lcsrcidx, nec+nic, _state); rvectorsetlengthatleast(&state->meritfunctionhistory, nlcslp_nonmonotonicphase2limit+1, _state); /* * Prepare scaled problem */ for(i=0; i<=n-1; i++) { state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); if( state->hasbndl.ptr.p_bool[i] ) { state->scaledbndl.ptr.p_double[i] = bndl->ptr.p_double[i]/s->ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { state->scaledbndu.ptr.p_double[i] = bndu->ptr.p_double[i]/s->ptr.p_double[i]; } if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { ae_assert(ae_fp_less_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]), "SLP: integrity check failed, box constraints are inconsistent", _state); } state->step0x.ptr.p_double[i] = x0->ptr.p_double[i]/s->ptr.p_double[i]; state->s.ptr.p_double[i] = s->ptr.p_double[i]; } for(i=0; i<=nec+nic-1; i++) { /* * Permutation */ state->lcsrcidx.ptr.p_int[i] = lcsrcidx->ptr.p_int[i]; /* * Scale and normalize linear constraints */ vv = 0.0; for(j=0; j<=n-1; j++) { v = cleic->ptr.pp_double[i][j]*s->ptr.p_double[j]; state->scaledcleic.ptr.pp_double[i][j] = v; vv = vv+v*v; } vv = ae_sqrt(vv, _state); state->scaledcleic.ptr.pp_double[i][n] = cleic->ptr.pp_double[i][n]; if( ae_fp_greater(vv,(double)(0)) ) { for(j=0; j<=n; j++) { state->scaledcleic.ptr.pp_double[i][j] = state->scaledcleic.ptr.pp_double[i][j]/vv; } } } /* * Initial enforcement of box constraints */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state->step0x.ptr.p_double[i] = ae_maxreal(state->step0x.ptr.p_double[i], state->scaledbndl.ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { state->step0x.ptr.p_double[i] = ae_minreal(state->step0x.ptr.p_double[i], state->scaledbndu.ptr.p_double[i], _state); } } /* * Stopping criteria */ state->epsx = epsx; state->maxits = maxits; /* * Report fields */ state->repsimplexiterations = 0; state->repsimplexiterations1 = 0; state->repsimplexiterations2 = 0; state->repsimplexiterations3 = 0; state->repterminationtype = 0; state->repbcerr = (double)(0); state->repbcidx = -1; state->replcerr = (double)(0); state->replcidx = -1; state->repnlcerr = (double)(0); state->repnlcidx = -1; state->repinneriterationscount = 0; state->repouteriterationscount = 0; /* * Integrity checks: * * it is important that significant step length is large enough that * we do not decrease trust regiod radius; it should also be small, * so we won't treat large steps as insignificant */ ae_assert(ae_fp_less(nlcslp_slpstpclosetozero,nlcslp_slpdeltadecrease), "MinSLP: integrity check failed", _state); ae_assert(ae_fp_less(nlcslp_slpdeltadecrease,nlcslp_slpdeltaincrease), "MinSLP: integrity check failed", _state); ae_assert(ae_fp_less(nlcslp_slpdeltaincrease,nlcslp_slpstpclosetoone), "MinSLP: integrity check failed", _state); } /************************************************************************* This function performs actual processing for SLP algorithm. It expects that caller redirects its reverse communication requests NeedFiJ/XUpdated to external user who will provide analytic derivative (or handle reports about progress). In case external user does not have analytic derivative, it is responsibility of caller to intercept NeedFiJ request and replace it with appropriate numerical differentiation scheme. Results are stored: * point - in State.StepKX IMPORTANT: this function works with scaled problem formulation; it is responsibility of the caller to unscale request and scale Jacobian. NOTE: SMonitor is expected to be correctly initialized smoothness monitor. -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ ae_bool minslpiteration(minslpstate* state, smoothnessmonitor* smonitor, ae_bool userterminationneeded, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t i; ae_int_t j; ae_int_t innerk; double v; double vv; double mx; ae_bool lpstagesuccess; double gammamax; double f1; double f2; ae_int_t status; double stp; double deltamax; double multiplyby; double setscaleto; double prevtrustrad; ae_bool dotrace; ae_bool dodetailedtrace; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; nslack = state->rstate.ia.ptr.p_int[1]; nec = state->rstate.ia.ptr.p_int[2]; nic = state->rstate.ia.ptr.p_int[3]; nlec = state->rstate.ia.ptr.p_int[4]; nlic = state->rstate.ia.ptr.p_int[5]; i = state->rstate.ia.ptr.p_int[6]; j = state->rstate.ia.ptr.p_int[7]; innerk = state->rstate.ia.ptr.p_int[8]; status = state->rstate.ia.ptr.p_int[9]; lpstagesuccess = state->rstate.ba.ptr.p_bool[0]; dotrace = state->rstate.ba.ptr.p_bool[1]; dodetailedtrace = state->rstate.ba.ptr.p_bool[2]; v = state->rstate.ra.ptr.p_double[0]; vv = state->rstate.ra.ptr.p_double[1]; mx = state->rstate.ra.ptr.p_double[2]; gammamax = state->rstate.ra.ptr.p_double[3]; f1 = state->rstate.ra.ptr.p_double[4]; f2 = state->rstate.ra.ptr.p_double[5]; stp = state->rstate.ra.ptr.p_double[6]; deltamax = state->rstate.ra.ptr.p_double[7]; multiplyby = state->rstate.ra.ptr.p_double[8]; setscaleto = state->rstate.ra.ptr.p_double[9]; prevtrustrad = state->rstate.ra.ptr.p_double[10]; } else { n = 359; nslack = -58; nec = -919; nic = -909; nlec = 81; nlic = 255; i = 74; j = -788; innerk = 809; status = 205; lpstagesuccess = ae_false; dotrace = ae_true; dodetailedtrace = ae_false; v = 763; vv = -541; mx = -698; gammamax = -900; f1 = -318; f2 = -940; stp = 1016; deltamax = -229; multiplyby = -536; setscaleto = 487; prevtrustrad = -115; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } /* * Routine body */ n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); dotrace = ae_is_trace_enabled("SLP"); dodetailedtrace = dotrace&&ae_is_trace_enabled("SLP.DETAILED"); /* * Prepare rcomm interface */ state->needfij = ae_false; state->xupdated = ae_false; /* * Initialize algorithm data: * * Lagrangian and "Big C" estimates * * trust region * * initial function scales (vector of 1's) * * current approximation of the Hessian matrix H (unit matrix) * * initial linearized constraints * * initial violation of linear/nonlinear constraints */ state->lpfailurecnt = 0; state->fstagnationcnt = 0; state->trustrad = nlcslp_inittrustrad; for(i=0; i<=nlec+nlic; i++) { state->fscales.ptr.p_double[i] = 1.0; } for(i=0; i<=nlcslp_nonmonotonicphase2limit; i++) { state->meritfunctionhistory.ptr.p_double[i] = ae_maxrealnumber; } state->historylen = 0; gammamax = 0.0; /* * Avoid spurious warnings about possibly uninitialized vars */ status = 0; stp = (double)(0); /* * Evaluate function vector and Jacobian at Step0X, send first location report. * Compute initial violation of constraints. */ nlcslp_slpsendx(state, &state->step0x, _state); state->needfij = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state->step0fi, &state->step0j, _state) ) { /* * Failed to retrieve function/Jaconian, infinities detected! */ for(i=0; i<=n-1; i++) { state->stepkx.ptr.p_double[i] = state->step0x.ptr.p_double[i]; } state->repterminationtype = -8; result = ae_false; return result; } nlcslp_slpcopystate(state, &state->step0x, &state->step0fi, &state->step0j, &state->stepkx, &state->stepkfi, &state->stepkj, _state); nlcslp_slpsendx(state, &state->stepkx, _state); state->f = state->stepkfi.ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->xupdated = ae_false; checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, &state->stepkx, n, &state->replcerr, &state->replcidx, _state); unscaleandchecknlcviolation(&state->stepkfi, &state->fscales, nlec, nlic, &state->repnlcerr, &state->repnlcidx, _state); /* * Trace output (if needed) */ if( dotrace ) { ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); ae_trace("// SLP SOLVER STARTED //\n"); ae_trace("////////////////////////////////////////////////////////////////////////////////////////////////////\n"); } /* * Perform outer (NLC) iterations */ nlcslp_initlpsubsolver(state, &state->subsolver, state->hessiantype, _state); lbl_5: if( ae_false ) { goto lbl_6; } /* * Before beginning new outer iteration: * * renormalize target function and/or constraints, if some of them have too large magnitudes * * save initial point for the outer iteration */ for(i=0; i<=nlec+nlic; i++) { /* * Determine (a) multiplicative coefficient applied to function value * and Jacobian row, and (b) new value of the function scale. */ mx = (double)(0); for(j=0; j<=n-1; j++) { mx = ae_maxreal(mx, ae_fabs(state->stepkj.ptr.pp_double[i][j], _state), _state); } multiplyby = 1.0; setscaleto = state->fscales.ptr.p_double[i]; if( ae_fp_greater_eq(mx,nlcslp_slpbigscale) ) { multiplyby = 1/mx; setscaleto = state->fscales.ptr.p_double[i]*mx; } if( ae_fp_less_eq(mx,nlcslp_slpsmallscale)&&ae_fp_greater(state->fscales.ptr.p_double[i],1.0) ) { if( ae_fp_greater(state->fscales.ptr.p_double[i]*mx,(double)(1)) ) { multiplyby = 1/mx; setscaleto = state->fscales.ptr.p_double[i]*mx; } else { multiplyby = state->fscales.ptr.p_double[i]; setscaleto = 1.0; } } if( ae_fp_neq(multiplyby,1.0) ) { /* * Function #I needs renormalization: * * update function vector element and Jacobian matrix row * * update FScales[] array */ state->stepkfi.ptr.p_double[i] = state->stepkfi.ptr.p_double[i]*multiplyby; for(j=0; j<=n-1; j++) { state->stepkj.ptr.pp_double[i][j] = state->stepkj.ptr.pp_double[i][j]*multiplyby; } state->fscales.ptr.p_double[i] = setscaleto; } } /* * Save initial point for the outer iteration */ nlcslp_slpcopystate(state, &state->stepkx, &state->stepkfi, &state->stepkj, &state->step0x, &state->step0fi, &state->step0j, _state); /* * Trace output (if needed) */ if( dotrace ) { ae_trace("\n=== OUTER ITERATION %5d STARTED ==================================================================\n", (int)(state->repouteriterationscount)); if( dodetailedtrace ) { ae_trace("> printing raw data (prior to applying variable and function scales)\n"); ae_trace("X (raw) = "); tracevectorunscaledunshiftedautoprec(&state->step0x, n, &state->s, ae_true, &state->s, ae_false, _state); ae_trace("\n"); ae_trace("> printing scaled data (after applying variable and function scales)\n"); ae_trace("X (scaled) = "); tracevectorautoprec(&state->step0x, 0, n, _state); ae_trace("\n"); ae_trace("FScales = "); tracevectorautoprec(&state->fscales, 0, 1+nlec+nlic, _state); ae_trace("\n"); ae_trace("Fi (scaled) = "); tracevectorautoprec(&state->stepkfi, 0, 1+nlec+nlic, _state); ae_trace("\n"); ae_trace("|Ji| (scaled) = "); tracerownrm1autoprec(&state->stepkj, 0, 1+nlec+nlic, 0, n, _state); ae_trace("\n"); } mx = (double)(0); for(i=1; i<=nlec; i++) { mx = ae_maxreal(mx, ae_fabs(state->stepkfi.ptr.p_double[i], _state), _state); } for(i=nlec+1; i<=nlec+nlic; i++) { mx = ae_maxreal(mx, state->stepkfi.ptr.p_double[i], _state); } ae_trace("trustRad = %0.3e\n", (double)(state->trustrad)); ae_trace("lin.violation = %0.3e (scaled violation of linear constraints)\n", (double)(state->replcerr)); ae_trace("nlc.violation = %0.3e (scaled violation of nonlinear constraints)\n", (double)(mx)); ae_trace("gammaMax = %0.3e\n", (double)(gammamax)); } /* * PHASE 1: * * * perform step using linear model with second order correction * * compute "reference" Lagrange multipliers * * compute merit function at the end of the phase 1 and push it to the history queue * * NOTE: a second order correction helps to overcome Maratos effect - a tendency * of L1 penalized merit function to reject nonzero steps along steepest * descent direction. * * The idea (explained in more details in the Phase13Iteration() body) * is to perform one look-ahead step and use updated constraint values * back at the initial point. */ nlcslp_phase13init(&state->state13, n, nec, nic, nlec, nlic, ae_false, _state); lbl_7: if( !nlcslp_phase13iteration(state, &state->state13, smonitor, userterminationneeded, &state->stepkx, &state->stepkfi, &state->stepkj, &state->meritlagmult, &status, &stp, _state) ) { goto lbl_8; } state->rstate.stage = 2; goto lbl_rcomm; lbl_2: goto lbl_7; lbl_8: if( status<0 ) { goto lbl_5; } if( status==0 ) { goto lbl_6; } for(i=state->historylen; i>=1; i--) { state->meritfunctionhistory.ptr.p_double[i] = state->meritfunctionhistory.ptr.p_double[i-1]; } state->meritfunctionhistory.ptr.p_double[0] = nlcslp_meritfunction(state, &state->stepkx, &state->stepkfi, &state->meritlagmult, &state->tmpmerit, _state); state->historylen = ae_minint(state->historylen+1, nlcslp_nonmonotonicphase2limit, _state); /* * PHASE 2: conjugate subiterations * * If step with second order correction is shorter than 1.0, it means * that target is sufficiently nonlinear to use advanced iterations. * * perform inner LP subiterations with additional conjugacy constraints * * check changes in merit function, discard iteration results if merit function increased */ if( ae_fp_greater_eq(stp,nlcslp_slpstpclosetoone) ) { goto lbl_9; } if( dotrace ) { ae_trace("> linear model produced short step, starting conjugate-gradient-like phase\n"); } nlcslp_slpcopystate(state, &state->stepkx, &state->stepkfi, &state->stepkj, &state->backupx, &state->backupfi, &state->backupj, _state); /* * LP subiterations */ nlcslp_phase2init(&state->state2, n, nec, nic, nlec, nlic, &state->meritlagmult, _state); lbl_11: if( !nlcslp_phase2iteration(state, &state->state2, smonitor, userterminationneeded, &state->stepkx, &state->stepkfi, &state->stepkj, &state->dummylagmult, &gammamax, &status, _state) ) { goto lbl_12; } state->rstate.stage = 3; goto lbl_rcomm; lbl_3: goto lbl_11; lbl_12: if( status==0 ) { /* * Save progress so far and stop */ goto lbl_6; } /* * Evaluating step * * This step is essential because previous step (which minimizes Lagrangian) may fail * to produce descent direction for L1-penalized merit function and will increase it * instead of decreasing. * * During evaluation we compare merit function at new location with maximum computed * over last NonmonotonicPhase2Limit+1 previous ones (as suggested in 'A Sequential * Quadratic Programming Algorithm with Non-Monotone Line Search' by Yu-Hong Dai). * * Settings NonmonotonicPhase2Limit to 0 will result in strictly monotonic line search, * whilst having nonzero limits means that we perform more robust nonmonotonic search. */ f1 = state->meritfunctionhistory.ptr.p_double[0]; for(i=1; i<=state->historylen; i++) { f1 = ae_maxreal(f1, state->meritfunctionhistory.ptr.p_double[i], _state); } f2 = nlcslp_meritfunction(state, &state->stepkx, &state->stepkfi, &state->meritlagmult, &state->tmpmerit, _state); if( dotrace ) { ae_trace("> evaluating changes in merit function (max over last %0d values is used for reference):\n", (int)(nlcslp_nonmonotonicphase2limit+1)); ae_trace("meritF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(f1), (double)(f2), (double)(f2-f1)); } if( ae_fp_less(f2,f1) ) { goto lbl_13; } /* * Merit function does not decrease, discard phase results and report is as one * more "fake" inner iteration. * * NOTE: it is important that F2=F1 is considered as "does not decrease" */ if( dotrace ) { ae_trace("> CG-like phase increased merit function, completely discarding phase (happens sometimes, but not too often)\n"); } nlcslp_slpcopystate(state, &state->backupx, &state->backupfi, &state->backupj, &state->stepkx, &state->stepkfi, &state->stepkj, _state); inc(&state->repinneriterationscount, _state); nlcslp_slpsendx(state, &state->stepkx, _state); state->f = state->stepkfi.ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->xupdated = ae_false; checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, &state->stepkx, n, &state->replcerr, &state->replcidx, _state); unscaleandchecknlcviolation(&state->stepkfi, &state->fscales, nlec, nlic, &state->repnlcerr, &state->repnlcidx, _state); goto lbl_14; lbl_13: /* * Merit function decreased, accept phase */ state->meritfunctionhistory.ptr.p_double[0] = f2; if( dotrace ) { ae_trace("> CG-like phase decreased merit function, CG-like step accepted\n"); } lbl_14: goto lbl_10; lbl_9: /* * No phase #2 */ if( dotrace ) { if( ae_fp_greater(stp,(double)(0)) ) { ae_trace("> linear model produced long step, no need to start CG-like iterations\n"); } else { ae_trace("> linear model produced zero step, maybe trust radius is too large\n"); } } lbl_10: /* * Update trust region */ prevtrustrad = state->trustrad; deltamax = (double)(0); for(i=0; i<=n-1; i++) { deltamax = ae_maxreal(deltamax, ae_fabs(state->step0x.ptr.p_double[i]-state->stepkx.ptr.p_double[i], _state)/state->trustrad, _state); } if( ae_fp_less_eq(deltamax,nlcslp_slpdeltadecrease) ) { state->trustrad = state->trustrad*ae_maxreal(deltamax/nlcslp_slpdeltadecrease, nlcslp_maxtrustraddecay, _state); } if( ae_fp_greater_eq(deltamax,nlcslp_slpdeltaincrease) ) { state->trustrad = state->trustrad*ae_minreal(deltamax/nlcslp_slpdeltaincrease, nlcslp_maxtrustradgrowth, _state); } /* * Trace */ if( dotrace ) { ae_trace("\n--- outer iteration ends ---------------------------------------------------------------------------\n"); ae_trace("deltaMax = %0.3f (ratio of step length to trust radius)\n", (double)(deltamax)); ae_trace("newTrustRad = %0.3e", (double)(state->trustrad)); if( ae_fp_greater(state->trustrad,prevtrustrad) ) { ae_trace(", trust radius increased"); } if( ae_fp_less(state->trustrad,prevtrustrad) ) { ae_trace(", trust radius decreased"); } ae_trace("\n"); } /* * Advance outer iteration counter, test stopping criteria */ inc(&state->repouteriterationscount, _state); if( ae_fp_less_eq(ae_fabs(state->stepkfi.ptr.p_double[0]-state->step0fi.ptr.p_double[0], _state),nlcslp_stagnationepsf*ae_fabs(state->step0fi.ptr.p_double[0], _state)) ) { inc(&state->fstagnationcnt, _state); } else { state->fstagnationcnt = 0; } if( ae_fp_less_eq(state->trustrad,state->epsx) ) { state->repterminationtype = 2; if( dotrace ) { ae_trace("> stopping condition met: trust radius is smaller than %0.3e\n", (double)(state->epsx)); } goto lbl_6; } if( state->maxits>0&&state->repinneriterationscount>=state->maxits ) { state->repterminationtype = 5; if( dotrace ) { ae_trace("> stopping condition met: %0d iterations performed\n", (int)(state->repinneriterationscount)); } goto lbl_6; } if( state->fstagnationcnt>=nlcslp_fstagnationlimit ) { state->repterminationtype = 7; if( dotrace ) { ae_trace("> stopping criteria are too stringent: F stagnated for %0d its, stopping\n", (int)(state->fstagnationcnt)); } goto lbl_6; } goto lbl_5; lbl_6: smoothnessmonitortracestatus(smonitor, dotrace, _state); result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = nslack; state->rstate.ia.ptr.p_int[2] = nec; state->rstate.ia.ptr.p_int[3] = nic; state->rstate.ia.ptr.p_int[4] = nlec; state->rstate.ia.ptr.p_int[5] = nlic; state->rstate.ia.ptr.p_int[6] = i; state->rstate.ia.ptr.p_int[7] = j; state->rstate.ia.ptr.p_int[8] = innerk; state->rstate.ia.ptr.p_int[9] = status; state->rstate.ba.ptr.p_bool[0] = lpstagesuccess; state->rstate.ba.ptr.p_bool[1] = dotrace; state->rstate.ba.ptr.p_bool[2] = dodetailedtrace; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = vv; state->rstate.ra.ptr.p_double[2] = mx; state->rstate.ra.ptr.p_double[3] = gammamax; state->rstate.ra.ptr.p_double[4] = f1; state->rstate.ra.ptr.p_double[5] = f2; state->rstate.ra.ptr.p_double[6] = stp; state->rstate.ra.ptr.p_double[7] = deltamax; state->rstate.ra.ptr.p_double[8] = multiplyby; state->rstate.ra.ptr.p_double[9] = setscaleto; state->rstate.ra.ptr.p_double[10] = prevtrustrad; return result; } /************************************************************************* This function initializes SLP subproblem. Should be called once in the beginning of the optimization. INPUT PARAMETERS: SState - solver state Subsolver - SLP subproblem to initialize HessianType - 0 for identity Hessian, 1 for BFGS update RETURN VALUE: True on success False on failure of the LP solver (unexpected... but possible due to numerical errors) -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcslp_initlpsubsolver(minslpstate* sstate, minslpsubsolver* subsolver, ae_int_t hessiantype, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t lccnt; ae_int_t nnz; ae_int_t offs; ae_int_t i; ae_int_t j; n = sstate->n; nec = sstate->nec; nic = sstate->nic; nlec = sstate->nlec; nlic = sstate->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); lccnt = nec+nic+nlec+nlic; /* * Create simplex solver. * * NOTE: we disable DSE pricing because it interferes with our * warm-start strategy. */ dsssettingsinit(&subsolver->dsssettings, _state); subsolver->dsssettings.pricing = 0; dssinit(nslack, &subsolver->dss, _state); /* * Allocate temporaries */ rvectorsetlengthatleast(&subsolver->cural, lccnt+n, _state); rvectorsetlengthatleast(&subsolver->curau, lccnt+n, _state); rmatrixsetlengthatleast(&subsolver->curd, n, n, _state); rmatrixsetlengthatleast(&subsolver->curhd, n, n, _state); rvectorsetlengthatleast(&subsolver->curbndl, nslack, _state); rvectorsetlengthatleast(&subsolver->curbndu, nslack, _state); rvectorsetlengthatleast(&subsolver->curb, nslack, _state); rvectorsetlengthatleast(&subsolver->sk, n, _state); rvectorsetlengthatleast(&subsolver->yk, n, _state); /* * Initial state */ subsolver->basispresent = ae_false; subsolver->curdcnt = 0; subsolver->hessiantype = hessiantype; if( hessiantype==1||hessiantype==2 ) { /* * Prepare Hessian matrix */ rmatrixsetlengthatleast(&subsolver->h, n, n, _state); for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { subsolver->h.ptr.pp_double[i][j] = (double)(0); } subsolver->h.ptr.pp_double[i][i] = (double)(1); } } /* * Linear constraints do not change across subiterations, that's * why we allocate storage for them at the start of the program. * * A full set of "raw" constraints is stored; later we will filter * out inequality ones which are inactive anywhere in the current * trust region. * * NOTE: because sparserawlc object stores only linear constraint * (linearizations of nonlinear ones are not stored) we * allocate only minimum necessary space. */ nnz = 0; for(i=0; i<=nec+nic-1; i++) { for(j=0; j<=n-1; j++) { if( sstate->scaledcleic.ptr.pp_double[i][j]!=0.0 ) { nnz = nnz+1; } } } ivectorsetlengthatleast(&subsolver->sparserawlc.ridx, nec+nic+1, _state); rvectorsetlengthatleast(&subsolver->sparserawlc.vals, nnz, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.idx, nnz, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.didx, nec+nic, _state); ivectorsetlengthatleast(&subsolver->sparserawlc.uidx, nec+nic, _state); offs = 0; subsolver->sparserawlc.ridx.ptr.p_int[0] = 0; for(i=0; i<=nec+nic-1; i++) { for(j=0; j<=n-1; j++) { if( sstate->scaledcleic.ptr.pp_double[i][j]!=0.0 ) { /* * Primary part of the matrix */ subsolver->sparserawlc.vals.ptr.p_double[offs] = sstate->scaledcleic.ptr.pp_double[i][j]; subsolver->sparserawlc.idx.ptr.p_int[offs] = j; offs = offs+1; } } subsolver->sparserawlc.ridx.ptr.p_int[i+1] = offs; } subsolver->sparserawlc.matrixtype = 1; subsolver->sparserawlc.ninitialized = subsolver->sparserawlc.ridx.ptr.p_int[nec+nic]; subsolver->sparserawlc.m = nec+nic; subsolver->sparserawlc.n = n; sparseinitduidx(&subsolver->sparserawlc, _state); } /************************************************************************* Restarts LP subproblem (cleans the matrix of internally stored directions) INPUT PARAMETERS: SState - solver state Subsolver - SLP subproblem to initialize -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcslp_lpsubproblemrestart(minslpstate* sstate, minslpsubsolver* subsolver, ae_state *_state) { subsolver->curdcnt = 0; } /************************************************************************* Updates Hessian estimate INPUT PARAMETERS: SState - solver state Subsolver - SLP subproblem to initialize -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcslp_lpsubproblemupdatehessian(minslpstate* sstate, minslpsubsolver* subsolver, /* Real */ ae_vector* x0, /* Real */ ae_vector* g0, /* Real */ ae_vector* x1, /* Real */ ae_vector* g1, ae_state *_state) { ae_int_t i; ae_int_t n; double vv; double v; double v0; double v1; double v2; double gk; double sk; double yk; n = sstate->n; if( subsolver->hessiantype==1||subsolver->hessiantype==2 ) { rvectorsetlengthatleast(&subsolver->tmp0, n, _state); v = (double)(0); v0 = (double)(0); v1 = (double)(0); v2 = (double)(0); for(i=0; i<=n-1; i++) { sk = x1->ptr.p_double[i]-x0->ptr.p_double[i]; yk = g1->ptr.p_double[i]-g0->ptr.p_double[i]; gk = g0->ptr.p_double[i]; v = v+sk*yk; v0 = v0+sk*sk; v1 = v1+yk*yk; v2 = v2+gk*gk; subsolver->sk.ptr.p_double[i] = sk; subsolver->yk.ptr.p_double[i] = yk; } if( (ae_fp_greater(ae_sqrt(v0, _state),ae_maxreal(sstate->epsx, nlcslp_bfgstol, _state))&&ae_fp_greater(ae_sqrt(v1, _state),nlcslp_bfgstol*ae_sqrt(v2, _state)))&&ae_fp_greater(v,nlcslp_bfgstol*ae_sqrt(v0, _state)*ae_sqrt(v1, _state)) ) { /* * Update Hessian if following criteria hold: * * MCINFO=1 (good step) * * step length is large enough * * |Yk| is large enough when compared with |G| * * (Sk,Yk) is large enough when compared with |S| and |G| */ vv = rmatrixsyvmv(n, &subsolver->h, 0, 0, ae_true, &subsolver->sk, 0, &subsolver->tmp0, _state); rmatrixgemv(n, n, 1.0, &subsolver->h, 0, 0, 0, &subsolver->sk, 0, 0.0, &subsolver->tmp0, 0, _state); rmatrixger(n, n, &subsolver->h, 0, 0, 1/v, &subsolver->yk, 0, &subsolver->yk, 0, _state); rmatrixger(n, n, &subsolver->h, 0, 0, -1/vv, &subsolver->tmp0, 0, &subsolver->tmp0, 0, _state); } } } /************************************************************************* This function solves LP subproblem given by initial point X, function vector Fi and Jacobian Jac, and returns estimates of Lagrangian multipliers and search direction D[]. This function does NOT append search direction D to conjugacy constraints, you have to use LPSubproblemAppendConjugacyConstraint(). -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcslp_lpsubproblemsolve(minslpstate* state, minslpsubsolver* subsolver, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, ae_int_t innerk, /* Real */ ae_vector* d, /* Real */ ae_vector* lagmult, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t i; ae_int_t j; ae_int_t k; double v; double vv; double vright; double vmax; ae_int_t basisinittype; ae_int_t lccnt; ae_int_t offsslackec; ae_int_t offsslacknlec; ae_int_t offsslackic; ae_int_t offsslacknlic; ae_int_t offs; ae_int_t nnz; ae_int_t j0; ae_int_t j1; ae_bool result; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); lccnt = nec+nic+nlec+nlic; /* * Locations of slack variables */ offsslackec = n; offsslacknlec = n+2*nec; offsslackic = n+2*nec+2*nlec; offsslacknlic = n+2*(nec+nlec)+nic; /* * Prepare temporary structures */ rvectorgrowto(&subsolver->cural, lccnt+subsolver->curdcnt, _state); rvectorgrowto(&subsolver->curau, lccnt+subsolver->curdcnt, _state); /* * Prepare default solution: all zeros */ result = ae_true; for(i=0; i<=nslack-1; i++) { d->ptr.p_double[i] = 0.0; } for(i=0; i<=lccnt-1; i++) { lagmult->ptr.p_double[i] = (double)(0); } /* * Linear term B * * NOTE: elements [N,NSlack) are equal to bigC + perturbation to improve numeric properties of LP problem */ for(i=0; i<=n-1; i++) { subsolver->curb.ptr.p_double[i] = jac->ptr.pp_double[0][i]; } v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(jac->ptr.pp_double[0][i], _state); } v = coalesce(ae_sqrt(v, _state), 1.0, _state); for(i=n; i<=nslack-1; i++) { subsolver->curb.ptr.p_double[i] = (nlcslp_bigc+1.0/(1+i))*v; } /* * Trust radius constraints for primary variables */ for(i=0; i<=n-1; i++) { subsolver->curbndl.ptr.p_double[i] = -state->trustrad; subsolver->curbndu.ptr.p_double[i] = state->trustrad; if( state->hasbndl.ptr.p_bool[i] ) { subsolver->curbndl.ptr.p_double[i] = ae_maxreal(subsolver->curbndl.ptr.p_double[i], state->scaledbndl.ptr.p_double[i]-x->ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { subsolver->curbndu.ptr.p_double[i] = ae_minreal(subsolver->curbndu.ptr.p_double[i], state->scaledbndu.ptr.p_double[i]-x->ptr.p_double[i], _state); } } /* * Prepare storage for "effective" constraining matrix */ nnz = subsolver->sparserawlc.ridx.ptr.p_int[nec+nic]; for(i=0; i<=nlec+nlic-1; i++) { for(j=0; j<=n-1; j++) { if( jac->ptr.pp_double[1+i][j]!=0.0 ) { nnz = nnz+1; } } } nnz = nnz+2*nec+nic; nnz = nnz+2*nlec+nlic; nnz = nnz+subsolver->curdcnt*n; ivectorgrowto(&subsolver->sparseefflc.ridx, lccnt+n+1, _state); rvectorgrowto(&subsolver->sparseefflc.vals, nnz, _state); ivectorgrowto(&subsolver->sparseefflc.idx, nnz, _state); ivectorsetlengthatleast(&subsolver->sparseefflc.didx, lccnt+n, _state); ivectorsetlengthatleast(&subsolver->sparseefflc.uidx, lccnt+n, _state); subsolver->sparseefflc.m = 0; subsolver->sparseefflc.n = nslack; subsolver->sparseefflc.matrixtype = 1; /* * Append linear equality/inequality constraints * * Scan sparsified linear constraints stored in sparserawlc[], skip ones * which are inactive anywhere in the trust region. */ rvectorsetlengthatleast(&subsolver->tmp0, nslack, _state); for(i=0; i<=n-1; i++) { subsolver->tmp0.ptr.p_double[i] = x->ptr.p_double[i]; } for(i=n; i<=nslack-1; i++) { subsolver->tmp0.ptr.p_double[i] = (double)(0); } for(i=0; i<=nec+nic-1; i++) { /* * Calculate: * * VRight - product of X[] (extended with zeros up to NSlack elements) * and AR[i] - Ith row of sparserawlc matrix. * * VMax - maximum value of X*ARi computed over trust region */ vright = (double)(0); vmax = (double)(0); j0 = subsolver->sparserawlc.ridx.ptr.p_int[i]; j1 = subsolver->sparserawlc.ridx.ptr.p_int[i+1]-1; for(k=j0; k<=j1; k++) { j = subsolver->sparserawlc.idx.ptr.p_int[k]; v = subsolver->tmp0.ptr.p_double[j]; vv = subsolver->sparserawlc.vals.ptr.p_double[k]; vright = vright+vv*v; if( vv>=0 ) { vmax = vmax+vv*(v+subsolver->curbndu.ptr.p_double[j]); } else { vmax = vmax+vv*(v+subsolver->curbndl.ptr.p_double[j]); } } /* * If constraint is an inequality one and guaranteed to be inactive * within trust region, it is skipped (row itself is retained but * filled by zeros). */ if( i>=nec&&vmax<=state->scaledcleic.ptr.pp_double[i][n] ) { offs = subsolver->sparseefflc.ridx.ptr.p_int[i]; subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslackic+(i-nec); subsolver->sparseefflc.ridx.ptr.p_int[i+1] = offs+1; subsolver->cural.ptr.p_double[i] = 0.0; subsolver->curau.ptr.p_double[i] = 0.0; subsolver->curbndl.ptr.p_double[offsslackic+(i-nec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackic+(i-nec)] = (double)(0); continue; } /* * Start working on row I */ offs = subsolver->sparseefflc.ridx.ptr.p_int[i]; /* * Copy constraint from sparserawlc[] to sparseefflc[] */ j0 = subsolver->sparserawlc.ridx.ptr.p_int[i]; j1 = subsolver->sparserawlc.ridx.ptr.p_int[i+1]-1; for(k=j0; k<=j1; k++) { subsolver->sparseefflc.idx.ptr.p_int[offs] = subsolver->sparserawlc.idx.ptr.p_int[k]; subsolver->sparseefflc.vals.ptr.p_double[offs] = subsolver->sparserawlc.vals.ptr.p_double[k]; offs = offs+1; } /* * Set up slack variables */ if( isparseefflc.vals.ptr.p_double[offs+0] = (double)(-1); subsolver->sparseefflc.vals.ptr.p_double[offs+1] = (double)(1); subsolver->sparseefflc.idx.ptr.p_int[offs+0] = offsslackec+2*i+0; subsolver->sparseefflc.idx.ptr.p_int[offs+1] = offsslackec+2*i+1; offs = offs+2; } else { /* * Slack variables for inequality constraints */ subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslackic+(i-nec); offs = offs+1; } /* * Finalize row */ subsolver->sparseefflc.ridx.ptr.p_int[i+1] = offs; /* * Set up bounds. * * NOTE: bounds for equality and inequality constraints are * handled differently */ v = vright-state->scaledcleic.ptr.pp_double[i][n]; if( icural.ptr.p_double[i] = -v; subsolver->curau.ptr.p_double[i] = -v; subsolver->curbndl.ptr.p_double[offsslackec+2*i+0] = (double)(0); subsolver->curbndl.ptr.p_double[offsslackec+2*i+1] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackec+2*i+0] = ae_fabs(v, _state); subsolver->curbndu.ptr.p_double[offsslackec+2*i+1] = ae_fabs(v, _state); } else { subsolver->cural.ptr.p_double[i] = _state->v_neginf; subsolver->curau.ptr.p_double[i] = -v; subsolver->curbndl.ptr.p_double[offsslackic+(i-nec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslackic+(i-nec)] = ae_maxreal(v, (double)(0), _state); } } subsolver->sparseefflc.m = subsolver->sparseefflc.m+(nec+nic); /* * Append nonlinear equality/inequality constraints */ for(i=0; i<=nlec+nlic-1; i++) { /* * Calculate scale coefficient */ vv = (double)(0); for(j=0; j<=n-1; j++) { v = jac->ptr.pp_double[1+i][j]; vv = vv+v*v; } vv = 1/coalesce(ae_sqrt(vv, _state), (double)(1), _state); /* * Copy scaled row */ offs = subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m+i]; for(j=0; j<=n-1; j++) { if( jac->ptr.pp_double[1+i][j]!=0.0 ) { subsolver->sparseefflc.vals.ptr.p_double[offs] = vv*jac->ptr.pp_double[1+i][j]; subsolver->sparseefflc.idx.ptr.p_int[offs] = j; offs = offs+1; } } if( isparseefflc.vals.ptr.p_double[offs+0] = (double)(-1); subsolver->sparseefflc.vals.ptr.p_double[offs+1] = (double)(1); subsolver->sparseefflc.idx.ptr.p_int[offs+0] = offsslacknlec+2*i+0; subsolver->sparseefflc.idx.ptr.p_int[offs+1] = offsslacknlec+2*i+1; offs = offs+2; } else { /* * Add slack terms for inequality constraints */ subsolver->sparseefflc.vals.ptr.p_double[offs] = (double)(-1); subsolver->sparseefflc.idx.ptr.p_int[offs] = offsslacknlic+(i-nlec); offs = offs+1; } subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m+i+1] = offs; /* * Set box constraints on slack variables and bounds on linear equality/inequality constraints */ v = vv*fi->ptr.p_double[1+i]; if( icural.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curau.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curbndl.ptr.p_double[offsslacknlec+2*i+0] = (double)(0); subsolver->curbndl.ptr.p_double[offsslacknlec+2*i+1] = (double)(0); subsolver->curbndu.ptr.p_double[offsslacknlec+2*i+0] = ae_fabs(v, _state); subsolver->curbndu.ptr.p_double[offsslacknlec+2*i+1] = ae_fabs(v, _state); } else { /* * Inequality constraint */ subsolver->cural.ptr.p_double[subsolver->sparseefflc.m+i] = _state->v_neginf; subsolver->curau.ptr.p_double[subsolver->sparseefflc.m+i] = -v; subsolver->curbndl.ptr.p_double[offsslacknlic+(i-nlec)] = (double)(0); subsolver->curbndu.ptr.p_double[offsslacknlic+(i-nlec)] = ae_maxreal(v, (double)(0), _state); } } subsolver->sparseefflc.m = subsolver->sparseefflc.m+(nlec+nlic); /* * Append conjugacy constraints */ for(i=0; i<=subsolver->curdcnt-1; i++) { /* * Copy N elements of CurHD * * NOTE: we expect product of D and H to be dense, so we copy all N elements */ v = (double)(0); for(j=0; j<=n-1; j++) { vv = subsolver->curhd.ptr.pp_double[i][j]; v = v+vv*vv; } v = 1.0/coalesce(ae_sqrt(v, _state), 1.0, _state); offs = subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]; for(j=0; j<=n-1; j++) { vv = subsolver->curhd.ptr.pp_double[i][j]; subsolver->sparseefflc.vals.ptr.p_double[offs] = v*vv; subsolver->sparseefflc.idx.ptr.p_int[offs] = j; offs = offs+1; } subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m+1] = offs; /* * Set bounds on linear constraints */ subsolver->cural.ptr.p_double[subsolver->sparseefflc.m] = (double)(0); subsolver->curau.ptr.p_double[subsolver->sparseefflc.m] = (double)(0); /* * Increase row count */ subsolver->sparseefflc.m = subsolver->sparseefflc.m+1; } /* * Finalize sparse matrix structure */ ae_assert(subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]<=subsolver->sparseefflc.idx.cnt, "LPSubproblemSolve: critical integrity check failed", _state); ae_assert(subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]<=subsolver->sparseefflc.vals.cnt, "LPSubproblemSolve: critical integrity check failed", _state); subsolver->sparseefflc.ninitialized = subsolver->sparseefflc.ridx.ptr.p_int[subsolver->sparseefflc.m]; sparseinitduidx(&subsolver->sparseefflc, _state); /* * Choose dual simplex method basis initialization type */ if( innerk==1&&subsolver->basispresent ) { basisinittype = 2; } else { basisinittype = 1; } /* * Solve linear program */ rvectorsetlengthatleast(&subsolver->tmp0, nslack, _state); for(i=0; i<=nslack-1; i++) { subsolver->tmp0.ptr.p_double[i] = state->trustrad; } dsssetproblem(&subsolver->dss, &subsolver->curb, &subsolver->curbndl, &subsolver->curbndu, &subsolver->tmp0, &subsolver->densedummy, &subsolver->sparseefflc, 1, &subsolver->cural, &subsolver->curau, subsolver->sparseefflc.m, &subsolver->lastbasis, basisinittype, &subsolver->dsssettings, _state); dssoptimize(&subsolver->dss, &subsolver->dsssettings, _state); state->repsimplexiterations = state->repsimplexiterations+subsolver->dss.repiterationscount; state->repsimplexiterations1 = state->repsimplexiterations1+subsolver->dss.repiterationscount1; state->repsimplexiterations2 = state->repsimplexiterations2+subsolver->dss.repiterationscount2; state->repsimplexiterations3 = state->repsimplexiterations3+subsolver->dss.repiterationscount3; if( subsolver->dss.repterminationtype<=0 ) { /* * LP solver failed due to numerical errors; exit */ result = ae_false; return result; } if( innerk==1 ) { /* * Store basis */ dssexportbasis(&subsolver->dss, &subsolver->lastbasis, _state); subsolver->basispresent = ae_true; } /* * Extract direction D[] and Lagrange multipliers */ for(i=0; i<=nslack-1; i++) { d->ptr.p_double[i] = subsolver->dss.repx.ptr.p_double[i]; } for(i=0; i<=lccnt-1; i++) { lagmult->ptr.p_double[i] = -subsolver->dss.repdx.ptr.p_double[i]; } return result; } /************************************************************************* This function appends last search direction D to conjugacy constraints of the LP subproblem. -- ALGLIB -- Copyright 05.03.2018 by Bochkanov Sergey *************************************************************************/ static void nlcslp_lpsubproblemappendconjugacyconstraint(minslpstate* state, minslpsubsolver* subsolver, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; n = state->n; /* * Update matrix of products H*Dprev */ ae_assert(subsolver->curdcntcurd.rows, "SLP: CurD is too small", _state); for(i=0; i<=n-1; i++) { /* * Store direction and default conjugacy constraint d'*I*Dprev=0 */ subsolver->curd.ptr.pp_double[subsolver->curdcnt][i] = d->ptr.p_double[i]; subsolver->curhd.ptr.pp_double[subsolver->curdcnt][i] = d->ptr.p_double[i]; } inc(&subsolver->curdcnt, _state); if( state->hessiantype==1 ) { /* * Conjugacy constraint d*H*Dprev=0, full recomputation of (H*Dprev) */ rmatrixgemm(subsolver->curdcnt, n, n, 1.0, &subsolver->curd, 0, 0, 0, &subsolver->h, 0, 0, 0, 0.0, &subsolver->curhd, 0, 0, _state); } if( state->hessiantype==2 ) { /* * Conjugacy constraint d*H*Dprev=0, only last row of (H*Dprev) is recomputed */ rvectorsetlengthatleast(&subsolver->tmp0, n, _state); rmatrixgemv(n, n, 1.0, &subsolver->h, 0, 0, 0, d, 0, 0.0, &subsolver->tmp0, 0, _state); for(j=0; j<=n-1; j++) { subsolver->curhd.ptr.pp_double[subsolver->curdcnt-1][j] = subsolver->tmp0.ptr.p_double[j]; } } } /************************************************************************* This function initializes Phase13 temporaries. It should be called before beginning of each new iteration. You may call it multiple times for the same instance of Phase13 temporaries. INPUT PARAMETERS: State13 - instance to be initialized. N - problem dimensionality NEC, NIC - linear equality/inequality constraint count NLEC, NLIC - nonlinear equality/inequality constraint count UseCorrection - True if we want to perform second order correction OUTPUT PARAMETERS: State13 - instance being initialized -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static void nlcslp_phase13init(minslpphase13state* state13, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, ae_bool usecorrection, ae_state *_state) { ae_int_t nslack; nslack = n+2*(nec+nlec)+(nic+nlic); state13->usecorrection = usecorrection; rvectorsetlengthatleast(&state13->d, nslack, _state); rvectorsetlengthatleast(&state13->dx, nslack, _state); rvectorsetlengthatleast(&state13->stepkxc, n, _state); rvectorsetlengthatleast(&state13->stepkxn, n, _state); rvectorsetlengthatleast(&state13->stepkfic, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state13->stepkfin, 1+nlec+nlic, _state); rmatrixsetlengthatleast(&state13->stepkjc, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&state13->stepkjn, 1+nlec+nlic, n, _state); rvectorsetlengthatleast(&state13->dummylagmult, nec+nic+nlec+nlic, _state); ae_vector_set_length(&state13->rphase13state.ia, 8+1, _state); ae_vector_set_length(&state13->rphase13state.ba, 2+1, _state); ae_vector_set_length(&state13->rphase13state.ra, 5+1, _state); state13->rphase13state.stage = -1; } /************************************************************************* This function tries to perform either phase #1 or phase #3 step. Former corresponds to linear model step (without conjugacy constraints) with correction for nonlinearity ("second order correction"). Such correction helps to overcome Maratos effect (a tendency of L1 penalized merit functions to reject nonzero steps). Latter is a step using linear model with no second order correction. INPUT PARAMETERS: State - SLP solver state SMonitor - smoothness monitor UserTerminationNeeded-True if user requested termination CurX - current point, array[N] CurFi - function vector at CurX, array[1+NLEC+NLIC] CurJ - Jacobian at CurX, array[1+NLEC+NLIC,N] LagMult - array[NEC+NIC+NLEC+NLIC], contents ignored on input. OUTPUT PARAMETERS: State - RepTerminationType is set to current termination code (if Status=0). CurX - advanced to new point CurFi - updated with function vector at CurX[] CurJ - updated with Jacobian at CurX[] LagMult - filled with current Lagrange multipliers Status - when reverse communication is done, Status is set to: * negative value, if we have to restart outer iteration * positive value, if we can proceed to the next stage of the outer iteration * zero, if algorithm is terminated (RepTerminationType is set to appropriate value) Stp - step length, in [0,1] -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcslp_phase13iteration(minslpstate* state, minslpphase13state* state13, smoothnessmonitor* smonitor, ae_bool userterminationneeded, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, ae_int_t* status, double* stp, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; ae_int_t innerk; ae_int_t i; ae_int_t j; double v; double mx; double f0; double f1; double nu; double localstp; ae_bool dotrace; ae_bool doprobing; ae_bool dotracexd; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state13->rphase13state.stage>=0 ) { n = state13->rphase13state.ia.ptr.p_int[0]; nslack = state13->rphase13state.ia.ptr.p_int[1]; nec = state13->rphase13state.ia.ptr.p_int[2]; nic = state13->rphase13state.ia.ptr.p_int[3]; nlec = state13->rphase13state.ia.ptr.p_int[4]; nlic = state13->rphase13state.ia.ptr.p_int[5]; innerk = state13->rphase13state.ia.ptr.p_int[6]; i = state13->rphase13state.ia.ptr.p_int[7]; j = state13->rphase13state.ia.ptr.p_int[8]; dotrace = state13->rphase13state.ba.ptr.p_bool[0]; doprobing = state13->rphase13state.ba.ptr.p_bool[1]; dotracexd = state13->rphase13state.ba.ptr.p_bool[2]; v = state13->rphase13state.ra.ptr.p_double[0]; mx = state13->rphase13state.ra.ptr.p_double[1]; f0 = state13->rphase13state.ra.ptr.p_double[2]; f1 = state13->rphase13state.ra.ptr.p_double[3]; nu = state13->rphase13state.ra.ptr.p_double[4]; localstp = state13->rphase13state.ra.ptr.p_double[5]; } else { n = 886; nslack = 346; nec = -722; nic = -413; nlec = -461; nlic = 927; innerk = 201; i = 922; j = -154; dotrace = ae_false; doprobing = ae_true; dotracexd = ae_true; v = -463; mx = 88; f0 = -861; f1 = -678; nu = -731; localstp = -675; } if( state13->rphase13state.stage==0 ) { goto lbl_0; } if( state13->rphase13state.stage==1 ) { goto lbl_1; } if( state13->rphase13state.stage==2 ) { goto lbl_2; } if( state13->rphase13state.stage==3 ) { goto lbl_3; } /* * Routine body */ n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); innerk = 1; dotrace = ae_is_trace_enabled("SLP"); dotracexd = dotrace&&ae_is_trace_enabled("SLP.DETAILED"); doprobing = ae_is_trace_enabled("SLP.PROBING"); ae_assert(lagmult->cnt>=nec+nic+nlec+nlic, "Phase13Iteration: integrity check failed", _state); /* * Report iteration beginning */ if( dotrace ) { if( state13->usecorrection ) { ae_trace("\n--- linear step with second-order correction -------------------------------------------------------\n"); } else { ae_trace("\n--- linear step without second-order correction ----------------------------------------------------\n"); } } /* * Default decision is to continue algorithm */ *status = 1; *stp = (double)(0); /* * Determine step direction using linearized model with no conjugacy terms */ nlcslp_lpsubproblemrestart(state, &state->subsolver, _state); if( !nlcslp_lpsubproblemsolve(state, &state->subsolver, curx, curfi, curj, innerk, &state13->d, lagmult, _state) ) { if( dotrace ) { ae_trace("> [WARNING] initial phase #1 LP subproblem failed\n"); } /* * Increase failures counter. * Stop after too many subsequent failures */ inc(&state->lpfailurecnt, _state); if( state->lpfailurecnt>=nlcslp_lpfailureslimit ) { state->repterminationtype = 7; *status = 0; if( dotrace ) { ae_trace("> stopping condition met: too many phase #1 LP failures\n"); } result = ae_false; return result; } /* * Can not solve LP subproblem, decrease trust radius */ state->trustrad = 0.5*state->trustrad; if( dotrace ) { ae_trace("> trust radius was decreased to %0.4e\n", (double)(state->trustrad)); } if( ae_fp_less(state->trustrad,state->epsx) ) { state->repterminationtype = 2; *status = 0; if( dotrace ) { ae_trace("> stopping condition met: trust radius is smaller than %0.3e\n", (double)(state->epsx)); } } else { *status = -1; } result = ae_false; return result; } nlcslp_lpsubproblemappendconjugacyconstraint(state, &state->subsolver, &state13->d, _state); /* * Compute second order correction if required. The issue we address here * is a tendency of L1 penalized function to reject steps built using simple * linearized model when nonlinear constraints change faster than the target. * * The idea is that we perform trial step (stp=1) using simple linearized model, * compute constraint vector at the new trial point - and use these updated * constraint linearizations back at the initial point. */ if( !state13->usecorrection ) { goto lbl_4; } /* * Perform trial step using vector D to StepKXC */ for(i=0; i<=n-1; i++) { state13->stepkxc.ptr.p_double[i] = curx->ptr.p_double[i]+state13->d.ptr.p_double[i]; } nlcslp_slpsendx(state, &state13->stepkxc, _state); state->needfij = ae_true; state13->rphase13state.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state13->stepkfic, &state13->stepkjc, _state) ) { /* * Failed to retrieve func/Jac, infinities detected */ state->repterminationtype = -8; *status = 0; if( dotrace ) { ae_trace("[ERROR] infinities in target/constraints are detected\n"); } result = ae_false; return result; } /* * Move back to point CurX[], restore original linearization of the target */ state13->stepkfic.ptr.p_double[0] = curfi->ptr.p_double[0]; for(j=0; j<=n-1; j++) { state13->stepkxc.ptr.p_double[j] = curx->ptr.p_double[j]; state13->stepkjc.ptr.pp_double[0][j] = curj->ptr.pp_double[0][j]; } /* * Extrapolate linearization of nonlinear constraints back to origin */ for(i=1; i<=nlec+nlic; i++) { v = (double)(0); for(j=0; j<=n-1; j++) { v = v+state13->d.ptr.p_double[j]*state13->stepkjc.ptr.pp_double[i][j]; } state13->stepkfic.ptr.p_double[i] = state13->stepkfic.ptr.p_double[i]-v; } /* * Solve linearized problem one more time, now with new linearization of constraints * (but still old linearization of the target), obtain DX * * NOTE: because lpsubproblemrestart() call resets set of conjugate constraints, we * have to re-add it after solve. */ nlcslp_lpsubproblemrestart(state, &state->subsolver, _state); if( !nlcslp_lpsubproblemsolve(state, &state->subsolver, &state13->stepkxc, &state13->stepkfic, &state13->stepkjc, innerk, &state13->dx, &state13->dummylagmult, _state) ) { /* * Second LP subproblem failed. * Noncritical failure, can be ignored, */ if( dotrace ) { ae_trace("> [WARNING] second phase #1 LP subproblem failed\n"); } if( dotrace ) { ae_trace("> using step without second order correction\n"); } } else { /* * Set D to new direction */ for(i=0; i<=n-1; i++) { state13->d.ptr.p_double[i] = state13->dx.ptr.p_double[i]; } } nlcslp_lpsubproblemappendconjugacyconstraint(state, &state->subsolver, &state13->d, _state); lbl_4: /* * Perform merit function backtracking line search, with trial point being * computed as XN = XK + Stp*D, with Stp in [0,1] * * NOTE: we use MeritLagMult - Lagrange multipliers computed for initial, * uncorrected task - for the merit function model. * Using DummyLagMult can destabilize algorithm. */ localstp = 1.0; nu = 0.5; f0 = nlcslp_meritfunction(state, curx, curfi, lagmult, &state13->tmpmerit, _state); f1 = f0; smoothnessmonitorstartlinesearch(smonitor, curx, curfi, curj, _state); lbl_6: if( ae_false ) { goto lbl_7; } for(i=0; i<=n-1; i++) { state13->stepkxn.ptr.p_double[i] = curx->ptr.p_double[i]+localstp*state13->d.ptr.p_double[i]; } nlcslp_slpsendx(state, &state13->stepkxn, _state); state->needfij = ae_true; state13->rphase13state.stage = 1; goto lbl_rcomm; lbl_1: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state13->stepkfin, &state13->stepkjn, _state) ) { /* * Failed to retrieve func/Jac, infinities detected */ state->repterminationtype = -8; *status = 0; if( dotrace ) { ae_trace("[ERROR] infinities in target/constraints are detected\n"); } result = ae_false; return result; } smoothnessmonitorenqueuepoint(smonitor, &state13->d, localstp, &state13->stepkxn, &state13->stepkfin, &state13->stepkjn, _state); f1 = nlcslp_meritfunction(state, &state13->stepkxn, &state13->stepkfin, lagmult, &state13->tmpmerit, _state); if( ae_fp_less(f1,f0) ) { /* * Step is found! */ goto lbl_7; } if( ae_fp_less(localstp,0.001) ) { /* * Step is shorter than 0.001 times current search direction, * it means that no good step can be found. */ localstp = (double)(0); nlcslp_slpcopystate(state, curx, curfi, curj, &state13->stepkxn, &state13->stepkfin, &state13->stepkjn, _state); goto lbl_7; } localstp = nu*localstp; nu = ae_maxreal(0.1, 0.5*nu, _state); goto lbl_6; lbl_7: smoothnessmonitorfinalizelinesearch(smonitor, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state13->stepkxn.ptr.p_double[i] = ae_maxreal(state13->stepkxn.ptr.p_double[i], state->scaledbndl.ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { state13->stepkxn.ptr.p_double[i] = ae_minreal(state13->stepkxn.ptr.p_double[i], state->scaledbndu.ptr.p_double[i], _state); } } if( userterminationneeded ) { /* * User requested termination, break before we move to new point */ state->repterminationtype = 8; *status = 0; if( dotrace ) { ae_trace("> user requested termination\n"); } result = ae_false; return result; } /* * Trace */ if( !dotrace ) { goto lbl_8; } if( !doprobing ) { goto lbl_10; } smoothnessmonitorstartprobing(smonitor, 1.0, 2, state->trustrad, _state); lbl_12: if( !smoothnessmonitorprobe(smonitor, _state) ) { goto lbl_13; } for(j=0; j<=n-1; j++) { state13->stepkxc.ptr.p_double[j] = curx->ptr.p_double[j]+smonitor->probingstp*state13->d.ptr.p_double[j]; if( state->hasbndl.ptr.p_bool[j] ) { state13->stepkxc.ptr.p_double[j] = ae_maxreal(state13->stepkxc.ptr.p_double[j], state->scaledbndl.ptr.p_double[j], _state); } if( state->hasbndu.ptr.p_bool[j] ) { state13->stepkxc.ptr.p_double[j] = ae_minreal(state13->stepkxc.ptr.p_double[j], state->scaledbndu.ptr.p_double[j], _state); } } nlcslp_slpsendx(state, &state13->stepkxc, _state); state->needfij = ae_true; state13->rphase13state.stage = 2; goto lbl_rcomm; lbl_2: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state13->stepkfic, &state13->stepkjc, _state) ) { goto lbl_13; } smonitor->probingf.ptr.p_double[0] = nlcslp_rawlagrangian(state, &state13->stepkxc, &state13->stepkfic, lagmult, &state13->tmpmerit, _state); smonitor->probingf.ptr.p_double[1] = state13->stepkfic.ptr.p_double[0]; goto lbl_12; lbl_13: ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | probing search direction suggested by LP subproblem |\n"); ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | Step | Lagrangian (unaugmentd)| Target function |\n"); ae_trace("*** |along D| must be smooth | must be smooth |\n"); ae_trace("*** | | function | slope | function | slope |\n"); smoothnessmonitortraceprobingresults(smonitor, _state); lbl_10: mx = (double)(0); for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(state13->d.ptr.p_double[i], _state)/state->trustrad, _state); } if( ae_fp_greater(localstp,(double)(0)) ) { ae_trace("> nonzero linear step was performed\n"); } else { ae_trace("> zero linear step was performed\n"); } ae_trace("max(|Di|)/TrustRad = %0.6f\n", (double)(mx)); ae_trace("stp = %0.6f\n", (double)(localstp)); if( dotracexd ) { ae_trace("X0 (scaled) = "); tracevectorautoprec(curx, 0, n, _state); ae_trace("\n"); ae_trace("D (scaled) = "); tracevectorautoprec(&state13->d, 0, n, _state); ae_trace("\n"); ae_trace("X1 (scaled) = "); tracevectorautoprec(&state13->stepkxn, 0, n, _state); ae_trace("\n"); } ae_trace("meritF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(f0), (double)(f1), (double)(f1-f0)); ae_trace("scaled-targetF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(curfi->ptr.p_double[0]), (double)(state13->stepkfin.ptr.p_double[0]), (double)(state13->stepkfin.ptr.p_double[0]-curfi->ptr.p_double[0])); lbl_8: /* * Move to new point */ *stp = localstp; nlcslp_slpcopystate(state, &state13->stepkxn, &state13->stepkfin, &state13->stepkjn, curx, curfi, curj, _state); if( ae_fp_less_eq(localstp,(double)(0)) ) { goto lbl_14; } /* * Report one more inner iteration */ inc(&state->repinneriterationscount, _state); nlcslp_slpsendx(state, curx, _state); state->f = curfi->ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; state13->rphase13state.stage = 3; goto lbl_rcomm; lbl_3: state->xupdated = ae_false; /* * Update constraint violations */ checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, curx, n, &state->replcerr, &state->replcidx, _state); unscaleandchecknlcviolation(curfi, &state->fscales, nlec, nlic, &state->repnlcerr, &state->repnlcidx, _state); lbl_14: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state13->rphase13state.ia.ptr.p_int[0] = n; state13->rphase13state.ia.ptr.p_int[1] = nslack; state13->rphase13state.ia.ptr.p_int[2] = nec; state13->rphase13state.ia.ptr.p_int[3] = nic; state13->rphase13state.ia.ptr.p_int[4] = nlec; state13->rphase13state.ia.ptr.p_int[5] = nlic; state13->rphase13state.ia.ptr.p_int[6] = innerk; state13->rphase13state.ia.ptr.p_int[7] = i; state13->rphase13state.ia.ptr.p_int[8] = j; state13->rphase13state.ba.ptr.p_bool[0] = dotrace; state13->rphase13state.ba.ptr.p_bool[1] = doprobing; state13->rphase13state.ba.ptr.p_bool[2] = dotracexd; state13->rphase13state.ra.ptr.p_double[0] = v; state13->rphase13state.ra.ptr.p_double[1] = mx; state13->rphase13state.ra.ptr.p_double[2] = f0; state13->rphase13state.ra.ptr.p_double[3] = f1; state13->rphase13state.ra.ptr.p_double[4] = nu; state13->rphase13state.ra.ptr.p_double[5] = localstp; return result; } /************************************************************************* This function initializes Phase2 temporaries. It should be called before beginning of each new iteration. You may call it multiple times for the same instance of Phase2 temporaries. INPUT PARAMETERS: State2 - instance to be initialized. N - problem dimensionality NEC, NIC - linear equality/inequality constraint count NLEC, NLIC - nonlinear equality/inequality constraint count MeritLagMult - Lagrange multiplier estimates used by merit function (we could use ones computed during phase #2, but these may differ from ones computed initially at the beginning of the outer iteration, so it may confuse algorithm) OUTPUT PARAMETERS: State2 - instance being initialized -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static void nlcslp_phase2init(minslpphase2state* state2, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t nlec, ae_int_t nlic, /* Real */ ae_vector* meritlagmult, ae_state *_state) { ae_int_t i; ae_int_t nslack; nslack = n+2*(nec+nlec)+(nic+nlic); rvectorsetlengthatleast(&state2->d, nslack, _state); rvectorsetlengthatleast(&state2->tmp0, nslack, _state); rvectorsetlengthatleast(&state2->stepkxn, n, _state); rvectorsetlengthatleast(&state2->stepkxc, n, _state); rvectorsetlengthatleast(&state2->stepkfin, 1+nlec+nlic, _state); rvectorsetlengthatleast(&state2->stepkfic, 1+nlec+nlic, _state); rmatrixsetlengthatleast(&state2->stepkjn, 1+nlec+nlic, n, _state); rmatrixsetlengthatleast(&state2->stepkjc, 1+nlec+nlic, n, _state); rvectorsetlengthatleast(&state2->stepklaggrad, n, _state); rvectorsetlengthatleast(&state2->stepknlaggrad, n, _state); rvectorsetlengthatleast(&state2->stepknlagmult, nec+nic+nlec+nlic, _state); rvectorsetlengthatleast(&state2->meritlagmult, nec+nic+nlec+nlic, _state); for(i=0; i<=nec+nic+nlec+nlic-1; i++) { state2->meritlagmult.ptr.p_double[i] = meritlagmult->ptr.p_double[i]; } ae_vector_set_length(&state2->rphase2state.ia, 12+1, _state); ae_vector_set_length(&state2->rphase2state.ba, 2+1, _state); ae_vector_set_length(&state2->rphase2state.ra, 8+1, _state); state2->rphase2state.stage = -1; } /************************************************************************* This function tries to perform phase #2 iterations. Phase #2 is a sequence of linearized steps minimizing L2-penalized Lagrangian performed with successively increasing set of conjugacy constraints (which make algorithm behavior similar to that of CG). INPUT PARAMETERS: State - SLP solver state SMonitor - smoothness monitor UserTerminationNeeded-True if user requested termination CurX - current point, array[N] CurFi - function vector at CurX, array[1+NLEC+NLIC] CurJ - Jacobian at CurX, array[1+NLEC+NLIC,N] LagMult - array[NEC+NIC+NLEC+NLIC], contents ignored on input. GammaMax - current estimate of the Hessian norm OUTPUT PARAMETERS: State - RepTerminationType is set to current termination code (if Status=0). CurX - advanced to new point CurFi - updated with function vector at CurX[] CurJ - updated with Jacobian at CurX[] LagMult - filled with current Lagrange multipliers GammaMax - updated estimate of the Hessian norm Status - when reverse communication is done, Status is set to: * negative value, if we have to restart outer iteration * positive value, if we can proceed to the next stage of the outer iteration * zero, if algorithm is terminated (RepTerminationType is set to appropriate value) -- ALGLIB -- Copyright 05.02.2019 by Bochkanov Sergey *************************************************************************/ static ae_bool nlcslp_phase2iteration(minslpstate* state, minslpphase2state* state2, smoothnessmonitor* smonitor, ae_bool userterminationneeded, /* Real */ ae_vector* curx, /* Real */ ae_vector* curfi, /* Real */ ae_matrix* curj, /* Real */ ae_vector* lagmult, double* gammamax, ae_int_t* status, ae_state *_state) { ae_int_t n; ae_int_t nslack; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; double stp; ae_int_t mcinfo; ae_int_t mcnfev; ae_int_t mcstage; ae_int_t i; ae_int_t j; ae_int_t innerk; double v; double vv; double mx; ae_int_t nondescentcnt; double stepklagval; double stepknlagval; double gammaprev; double f0; double f1; ae_bool dotrace; ae_bool doprobing; ae_bool dotracexd; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state2->rphase2state.stage>=0 ) { n = state2->rphase2state.ia.ptr.p_int[0]; nslack = state2->rphase2state.ia.ptr.p_int[1]; nec = state2->rphase2state.ia.ptr.p_int[2]; nic = state2->rphase2state.ia.ptr.p_int[3]; nlec = state2->rphase2state.ia.ptr.p_int[4]; nlic = state2->rphase2state.ia.ptr.p_int[5]; mcinfo = state2->rphase2state.ia.ptr.p_int[6]; mcnfev = state2->rphase2state.ia.ptr.p_int[7]; mcstage = state2->rphase2state.ia.ptr.p_int[8]; i = state2->rphase2state.ia.ptr.p_int[9]; j = state2->rphase2state.ia.ptr.p_int[10]; innerk = state2->rphase2state.ia.ptr.p_int[11]; nondescentcnt = state2->rphase2state.ia.ptr.p_int[12]; dotrace = state2->rphase2state.ba.ptr.p_bool[0]; doprobing = state2->rphase2state.ba.ptr.p_bool[1]; dotracexd = state2->rphase2state.ba.ptr.p_bool[2]; stp = state2->rphase2state.ra.ptr.p_double[0]; v = state2->rphase2state.ra.ptr.p_double[1]; vv = state2->rphase2state.ra.ptr.p_double[2]; mx = state2->rphase2state.ra.ptr.p_double[3]; stepklagval = state2->rphase2state.ra.ptr.p_double[4]; stepknlagval = state2->rphase2state.ra.ptr.p_double[5]; gammaprev = state2->rphase2state.ra.ptr.p_double[6]; f0 = state2->rphase2state.ra.ptr.p_double[7]; f1 = state2->rphase2state.ra.ptr.p_double[8]; } else { n = -763; nslack = -233; nec = -936; nic = -279; nlec = 94; nlic = -812; mcinfo = 427; mcnfev = 178; mcstage = -819; i = -826; j = 667; innerk = 692; nondescentcnt = 84; dotrace = ae_true; doprobing = ae_false; dotracexd = ae_false; stp = -908; v = 577; vv = 289; mx = 317; stepklagval = 476; stepknlagval = -889; gammaprev = -400; f0 = 489; f1 = -962; } if( state2->rphase2state.stage==0 ) { goto lbl_0; } if( state2->rphase2state.stage==1 ) { goto lbl_1; } if( state2->rphase2state.stage==2 ) { goto lbl_2; } /* * Routine body */ n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; nslack = n+2*(nec+nlec)+(nic+nlic); dotrace = ae_is_trace_enabled("SLP"); dotracexd = dotrace&&ae_is_trace_enabled("SLP.DETAILED"); doprobing = ae_is_trace_enabled("SLP.PROBING"); ae_assert(lagmult->cnt>=nec+nic+nlec+nlic, "Phase13Iteration: integrity check failed", _state); /* * Report iteration beginning */ if( dotrace ) { ae_trace("\n--- linear step with conjugate constraints (CG-like convergence) -----------------------------------\n"); } /* * The default decision is to continue iterations */ *status = 1; /* * Perform inner LP subiterations. * * During this process we maintain information about several points: * * point #0, initial one, with "step0" prefix * * point #K, last one of current LP session, with "stepk" prefix * * additionally we have point #KN, current candidate during line search at step K. * * For each point we store: * * location X (scaled coordinates) * * function vector Fi (target function + nonlinear constraints) * * scaled Jacobian J */ nondescentcnt = 0; nlcslp_lpsubproblemrestart(state, &state->subsolver, _state); innerk = 1; lbl_3: if( innerk>n ) { goto lbl_5; } /* * Formulate LP subproblem and solve it */ if( !nlcslp_lpsubproblemsolve(state, &state->subsolver, curx, curfi, curj, innerk, &state2->d, lagmult, _state) ) { /* * LP solver failed due to numerical errors; exit. * It may happen when we solve problem with LOTS of conjugacy constraints. */ if( innerk==1 ) { /* * The very first iteration failed, really strange. */ if( dotrace ) { ae_trace("[WARNING] the very first LP subproblem failed to produce descent direction\n"); } } else { /* * Quite a normal, the problem is overconstrained by conjugacy constraints now */ if( dotrace ) { ae_trace("> LP subproblem is overconstrained (happens after too many iterations), time to stop\n"); } } result = ae_false; return result; } mx = (double)(0); for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(state2->d.ptr.p_double[i], _state)/state->trustrad, _state); } if( ae_fp_eq(mx,(double)(0)) ) { /* * Nearly-zero direction is suggested (maybe we arrived exactly to the solution), stop iterations */ *status = 1; nlcslp_slpcopystate(state, curx, curfi, curj, &state2->stepkxn, &state2->stepkfin, &state2->stepkjn, _state); if( dotrace ) { ae_trace("> LP subproblem suggested nearly zero step\n"); } if( dotrace ) { ae_trace("max(|Di|)/TrustRad = %0.6f\n", (double)(mx)); } if( dotrace ) { ae_trace("> stopping CG-like iterations\n"); } result = ae_false; return result; } nlcslp_lpsubproblemappendconjugacyconstraint(state, &state->subsolver, &state2->d, _state); /* * Perform line search to minimize Lagrangian along D. * Post-normalize StepKXN with respect to box constraints. * * MCSRCH can fail in the following cases: * * rounding errors prevent optimization * * non-descent direction is specified (MCINFO=0 is returned) * In the latter case we proceed to minimization of merit function. * * NOTE: constraint violation reports are updated during Lagrangian computation */ state2->lastlcerr = (double)(0); state2->lastlcidx = -1; state2->lastnlcerr = (double)(0); state2->lastnlcidx = -1; rvectorsetlengthatleast(&state2->tmp0, n, _state); nlcslp_lagrangianfg(state, curx, state->trustrad, curfi, curj, lagmult, &state2->tmplagrangianfg, &stepklagval, &state2->stepklaggrad, &state2->lastlcerr, &state2->lastlcidx, &state2->lastnlcerr, &state2->lastnlcidx, _state); nlcslp_slpcopystate(state, curx, curfi, curj, &state2->stepkxn, &state2->stepkfin, &state2->stepkjn, _state); v = (double)(0); for(i=0; i<=n-1; i++) { state2->stepknlaggrad.ptr.p_double[i] = state2->stepklaggrad.ptr.p_double[i]; v = v+state2->d.ptr.p_double[i]*state2->stepklaggrad.ptr.p_double[i]; } if( ae_fp_greater_eq(v,(double)(0)) ) { /* * Non-descent direction D was specified; it may happen because LP subproblem favors * directions which decrease L1 penalty and default augmentation of Lagrangian involves * only L2 term. * * Append direction to the conjugacy constraints and retry direction generation. * * We make several retries with conjugate directions before giving up. */ if( dotrace ) { ae_trace("> LP subproblem suggested nondescent step, skipping it (dLag=%0.3e)\n", (double)(v)); } inc(&nondescentcnt, _state); if( nlcslp_nondescentlimit>0&&nondescentcnt>nlcslp_nondescentlimit ) { if( dotrace ) { ae_trace("> too many nondescent steps, stopping CG-like iterations\n"); } *status = 1; result = ae_false; return result; } goto lbl_4; } smoothnessmonitorstartlinesearch(smonitor, curx, curfi, curj, _state); stepknlagval = stepklagval; mcnfev = 0; mcstage = 0; stp = 1.0; mcsrch(n, &state2->stepkxn, &stepknlagval, &state2->stepknlaggrad, &state2->d, &stp, 1.0, nlcslp_slpgtol, &mcinfo, &mcnfev, &state2->tmp0, &state2->mcstate, &mcstage, _state); lbl_6: if( mcstage==0 ) { goto lbl_7; } nlcslp_slpsendx(state, &state2->stepkxn, _state); state->needfij = ae_true; state2->rphase2state.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state2->stepkfin, &state2->stepkjn, _state) ) { /* * Failed to retrieve func/Jac, infinities detected */ *status = 0; state->repterminationtype = -8; if( dotrace ) { ae_trace("[ERROR] infinities in target/constraints are detected\n"); } result = ae_false; return result; } smoothnessmonitorenqueuepoint(smonitor, &state2->d, stp, &state2->stepkxn, &state2->stepkfin, &state2->stepkjn, _state); nlcslp_lagrangianfg(state, &state2->stepkxn, state->trustrad, &state2->stepkfin, &state2->stepkjn, lagmult, &state2->tmplagrangianfg, &stepknlagval, &state2->stepknlaggrad, &state2->lastlcerr, &state2->lastlcidx, &state2->lastnlcerr, &state2->lastnlcidx, _state); mcsrch(n, &state2->stepkxn, &stepknlagval, &state2->stepknlaggrad, &state2->d, &stp, 1.0, nlcslp_slpgtol, &mcinfo, &mcnfev, &state2->tmp0, &state2->mcstate, &mcstage, _state); goto lbl_6; lbl_7: smoothnessmonitorfinalizelinesearch(smonitor, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state2->stepkxn.ptr.p_double[i] = ae_maxreal(state2->stepkxn.ptr.p_double[i], state->scaledbndl.ptr.p_double[i], _state); } if( state->hasbndu.ptr.p_bool[i] ) { state2->stepkxn.ptr.p_double[i] = ae_minreal(state2->stepkxn.ptr.p_double[i], state->scaledbndu.ptr.p_double[i], _state); } } if( mcinfo<=0 ) { /* * Line search failed miserably, terminate */ *status = 1; if( innerk==1 ) { /* * The very first iteration failed, really strange. * Let's decrease trust radius and try one more time. */ state->trustrad = 0.5*state->trustrad; if( dotrace ) { ae_trace("> line search failed miserably for unknown reason, decreasing trust radius\n"); } if( ae_fp_less(state->trustrad,state->epsx) ) { state->repterminationtype = 2; *status = 0; if( dotrace ) { ae_trace("> stopping condition met: trust radius is smaller than %0.3e\n", (double)(state->epsx)); } } } else { /* * Well, it can be normal */ if( dotrace ) { ae_trace("> line search failed miserably for unknown reason, proceeding further\n"); } } result = ae_false; return result; } if( mcinfo==1 ) { nlcslp_lpsubproblemupdatehessian(state, &state->subsolver, curx, &state2->stepklaggrad, &state2->stepkxn, &state2->stepknlaggrad, _state); } /* * Update GammaMax - estimate of the function Hessian norm */ v = (double)(0); vv = (double)(0); mx = (double)(0); for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(state2->stepkxn.ptr.p_double[i]-curx->ptr.p_double[i], _state), _state); v = v+ae_sqr(state2->stepkxn.ptr.p_double[i]-curx->ptr.p_double[i], _state); vv = vv+(state2->stepkjn.ptr.pp_double[0][i]-curj->ptr.pp_double[0][i])*(state2->stepkxn.ptr.p_double[i]-curx->ptr.p_double[i]); } gammaprev = *gammamax; if( ae_fp_greater(mx,nlcslp_bfgstol) ) { *gammamax = ae_maxreal(*gammamax, ae_fabs(vv/v, _state), _state); } /* * Trace */ if( !dotrace ) { goto lbl_8; } if( !doprobing ) { goto lbl_10; } smoothnessmonitorstartprobing(smonitor, 1.0, 2, state->trustrad, _state); lbl_12: if( !smoothnessmonitorprobe(smonitor, _state) ) { goto lbl_13; } for(j=0; j<=n-1; j++) { state2->stepkxc.ptr.p_double[j] = curx->ptr.p_double[j]+smonitor->probingstp*state2->d.ptr.p_double[j]; if( state->hasbndl.ptr.p_bool[j] ) { state2->stepkxc.ptr.p_double[j] = ae_maxreal(state2->stepkxc.ptr.p_double[j], state->scaledbndl.ptr.p_double[j], _state); } if( state->hasbndu.ptr.p_bool[j] ) { state2->stepkxc.ptr.p_double[j] = ae_minreal(state2->stepkxc.ptr.p_double[j], state->scaledbndu.ptr.p_double[j], _state); } } nlcslp_slpsendx(state, &state2->stepkxc, _state); state->needfij = ae_true; state2->rphase2state.stage = 1; goto lbl_rcomm; lbl_1: state->needfij = ae_false; if( !nlcslp_slpretrievefij(state, &state2->stepkfic, &state2->stepkjc, _state) ) { goto lbl_13; } smonitor->probingf.ptr.p_double[0] = nlcslp_rawlagrangian(state, &state2->stepkxc, &state2->stepkfic, lagmult, &state2->tmpmerit, _state); smonitor->probingf.ptr.p_double[1] = state2->stepkfic.ptr.p_double[0]; goto lbl_12; lbl_13: ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | probing search direction suggested by LP subproblem |\n"); ae_trace("*** ------------------------------------------------------------\n"); ae_trace("*** | Step | Lagrangian (unaugmentd)| Target function |\n"); ae_trace("*** |along D| must be smooth | must be smooth |\n"); ae_trace("*** | | function | slope | function | slope |\n"); smoothnessmonitortraceprobingresults(smonitor, _state); lbl_10: mx = (double)(0); for(i=0; i<=n-1; i++) { mx = ae_maxreal(mx, ae_fabs(state2->d.ptr.p_double[i], _state)/state->trustrad, _state); } f0 = nlcslp_meritfunction(state, curx, curfi, &state2->meritlagmult, &state2->tmpmerit, _state); f1 = nlcslp_meritfunction(state, &state2->stepkxn, &state2->stepkfin, &state2->meritlagmult, &state2->tmpmerit, _state); ae_trace("> LP subproblem produced good direction, minimization was performed\n"); ae_trace("max(|Di|)/TrustRad = %0.6f\n", (double)(mx)); ae_trace("stp = %0.6f\n", (double)(stp)); if( dotracexd ) { ae_trace("X0 = "); tracevectorautoprec(curx, 0, n, _state); ae_trace("\n"); ae_trace("D = "); tracevectorautoprec(&state2->d, 0, n, _state); ae_trace("\n"); ae_trace("X1 = X0 + stp*D\n"); ae_trace(" = "); tracevectorautoprec(&state2->stepkxn, 0, n, _state); ae_trace("\n"); } ae_trace("meritF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(f0), (double)(f1), (double)(f1-f0)); ae_trace("scaled-targetF: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(curfi->ptr.p_double[0]), (double)(state2->stepkfin.ptr.p_double[0]), (double)(state2->stepkfin.ptr.p_double[0]-curfi->ptr.p_double[0])); ae_trace("aug.Lagrangian: %14.6e -> %14.6e (delta=%11.3e)\n", (double)(stepklagval), (double)(stepknlagval), (double)(stepknlagval-stepklagval)); if( ae_fp_greater(*gammamax,gammaprev) ) { ae_trace("|H| = %0.3e (Hessian norm increased)\n", (double)(*gammamax)); } lbl_8: /* * Check status of the termination request * Update current point * Update constraint status. * Report iteration. */ if( userterminationneeded ) { /* * User requested termination, break before we move to new point */ *status = 0; state->repterminationtype = 8; if( dotrace ) { ae_trace("# user requested termination\n"); } result = ae_false; return result; } nlcslp_slpcopystate(state, &state2->stepkxn, &state2->stepkfin, &state2->stepkjn, curx, curfi, curj, _state); state->replcerr = state2->lastlcerr; state->replcidx = state2->lastlcidx; state->repnlcerr = state2->lastnlcerr; state->repnlcidx = state2->lastnlcidx; inc(&state->repinneriterationscount, _state); nlcslp_slpsendx(state, curx, _state); state->f = curfi->ptr.p_double[0]*state->fscales.ptr.p_double[0]; state->xupdated = ae_true; state2->rphase2state.stage = 2; goto lbl_rcomm; lbl_2: state->xupdated = ae_false; /* * Terminate inner LP subiterations */ if( state->maxits>0&&state->repinneriterationscount>=state->maxits ) { /* * Iteration limit exhausted */ *status = 1; if( dotrace ) { ae_trace("# stopping criteria met (MaxIts iterations performed)\n"); } result = ae_false; return result; } if( ae_fp_greater_eq(stp,nlcslp_slpstpclosetoone) ) { /* * Step is close to 1.0, either of two is likely: * * we move through nearly linear region of F() * * we try to enforce some strongly violated constraint * * In any case, authors of the original algorithm recommend to break inner LP * iteration and proceed to test of sufficient decrease of merit function. */ *status = 1; if( dotrace ) { ae_trace("> step is close to 1, stopping iterations\n"); } result = ae_false; return result; } if( (mcinfo!=1&&mcinfo!=3)&&mcinfo!=5 ) { /* * Line search ended with "bad" MCINFO * (neither sufficient decrease, neither maximum step); * terminate. */ *status = 1; if( dotrace ) { ae_trace("> line search ended with bad MCINFO, no more CG-like iterations\n"); } result = ae_false; return result; } lbl_4: innerk = innerk+1; goto lbl_3; lbl_5: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state2->rphase2state.ia.ptr.p_int[0] = n; state2->rphase2state.ia.ptr.p_int[1] = nslack; state2->rphase2state.ia.ptr.p_int[2] = nec; state2->rphase2state.ia.ptr.p_int[3] = nic; state2->rphase2state.ia.ptr.p_int[4] = nlec; state2->rphase2state.ia.ptr.p_int[5] = nlic; state2->rphase2state.ia.ptr.p_int[6] = mcinfo; state2->rphase2state.ia.ptr.p_int[7] = mcnfev; state2->rphase2state.ia.ptr.p_int[8] = mcstage; state2->rphase2state.ia.ptr.p_int[9] = i; state2->rphase2state.ia.ptr.p_int[10] = j; state2->rphase2state.ia.ptr.p_int[11] = innerk; state2->rphase2state.ia.ptr.p_int[12] = nondescentcnt; state2->rphase2state.ba.ptr.p_bool[0] = dotrace; state2->rphase2state.ba.ptr.p_bool[1] = doprobing; state2->rphase2state.ba.ptr.p_bool[2] = dotracexd; state2->rphase2state.ra.ptr.p_double[0] = stp; state2->rphase2state.ra.ptr.p_double[1] = v; state2->rphase2state.ra.ptr.p_double[2] = vv; state2->rphase2state.ra.ptr.p_double[3] = mx; state2->rphase2state.ra.ptr.p_double[4] = stepklagval; state2->rphase2state.ra.ptr.p_double[5] = stepknlagval; state2->rphase2state.ra.ptr.p_double[6] = gammaprev; state2->rphase2state.ra.ptr.p_double[7] = f0; state2->rphase2state.ra.ptr.p_double[8] = f1; return result; } /************************************************************************* Copies X to State.X *************************************************************************/ static void nlcslp_slpsendx(minslpstate* state, /* Real */ ae_vector* xs, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&xs->ptr.p_double[i]<=state->scaledbndl.ptr.p_double[i] ) { state->x.ptr.p_double[i] = state->scaledbndl.ptr.p_double[i]; continue; } if( state->hasbndu.ptr.p_bool[i]&&xs->ptr.p_double[i]>=state->scaledbndu.ptr.p_double[i] ) { state->x.ptr.p_double[i] = state->scaledbndu.ptr.p_double[i]; continue; } state->x.ptr.p_double[i] = xs->ptr.p_double[i]; } } /************************************************************************* Retrieves F-vector and scaled Jacobian, copies them to FiS and JS. Returns True on success, False on failure (when F or J are not finite numbers). *************************************************************************/ static ae_bool nlcslp_slpretrievefij(minslpstate* state, /* Real */ ae_vector* fis, /* Real */ ae_matrix* js, ae_state *_state) { ae_int_t nlec; ae_int_t nlic; ae_int_t n; ae_int_t i; ae_int_t j; double v; double vv; ae_bool result; n = state->n; nlec = state->nlec; nlic = state->nlic; v = (double)(0); for(i=0; i<=nlec+nlic; i++) { vv = 1/state->fscales.ptr.p_double[i]; fis->ptr.p_double[i] = vv*state->fi.ptr.p_double[i]; v = 0.1*v+fis->ptr.p_double[i]; for(j=0; j<=n-1; j++) { js->ptr.pp_double[i][j] = vv*state->j.ptr.pp_double[i][j]; v = 0.1*v+js->ptr.pp_double[i][j]; } } result = ae_isfinite(v, _state); return result; } /************************************************************************* Copies state (X point, Fi vector, J jacobian) to preallocated storage. *************************************************************************/ static void nlcslp_slpcopystate(minslpstate* state, /* Real */ ae_vector* x0, /* Real */ ae_vector* fi0, /* Real */ ae_matrix* j0, /* Real */ ae_vector* x1, /* Real */ ae_vector* fi1, /* Real */ ae_matrix* j1, ae_state *_state) { ae_int_t nlec; ae_int_t nlic; ae_int_t n; ae_int_t i; ae_int_t j; n = state->n; nlec = state->nlec; nlic = state->nlic; for(i=0; i<=n-1; i++) { x1->ptr.p_double[i] = x0->ptr.p_double[i]; } for(i=0; i<=nlec+nlic; i++) { fi1->ptr.p_double[i] = fi0->ptr.p_double[i]; for(j=0; j<=n-1; j++) { j1->ptr.pp_double[i][j] = j0->ptr.pp_double[i][j]; } } } /************************************************************************* This function calculates Lagrangian of the problem (in scaled variables): its value and gradient. Additionally it also estimates violation of linear constraints at the point as well as index of the most violated constraint *************************************************************************/ static void nlcslp_lagrangianfg(minslpstate* state, /* Real */ ae_vector* x, double trustrad, /* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* lagmult, minslptmplagrangian* tmp, double* f, /* Real */ ae_vector* g, double* lcerr, ae_int_t* lcidx, double* nlcerr, ae_int_t* nlcidx, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; double v; double vlag; double vact; double vd; double vviolate; ae_bool usesparsegemv; double dampingfactor; *f = 0; *lcerr = 0; *lcidx = 0; *nlcerr = 0; *nlcidx = 0; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; dampingfactor = nlcslp_inequalitydampingfactor/trustrad; /* * Prepare constraint violation report */ *lcerr = (double)(0); *lcidx = -1; *nlcerr = (double)(0); *nlcidx = -1; /* * Target function */ *f = fi->ptr.p_double[0]; for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = j->ptr.pp_double[0][i]; } /* * Lagrangian terms for linear constraints, constraint violations */ if( nec+nic>0 ) { usesparsegemv = state->subsolver.sparserawlc.ridx.ptr.p_int[nec+nic]sclagtmp0, ae_maxint(nec+nic, n, _state), _state); rvectorsetlengthatleast(&tmp->sclagtmp1, ae_maxint(nec+nic, n, _state), _state); if( usesparsegemv ) { sparsemv(&state->subsolver.sparserawlc, x, &tmp->sclagtmp0, _state); } else { rmatrixgemv(nec+nic, n, 1.0, &state->scaledcleic, 0, 0, 0, x, 0, 0.0, &tmp->sclagtmp0, 0, _state); } for(i=0; i<=nec+nic-1; i++) { /* * Estimate constraint value at the point, update violation info * * NOTE: here we expect that scaledCLEIC[] has normalized rows */ v = tmp->sclagtmp0.ptr.p_double[i]-state->scaledcleic.ptr.pp_double[i][n]; if( i0 ) { /* * Either equality constraint or violated inequality one. * Update violation report. */ vviolate = ae_fabs(v, _state); if( vviolate>*lcerr ) { *lcerr = vviolate; *lcidx = state->lcsrcidx.ptr.p_int[i]; } } /* * Prepare */ vlag = lagmult->ptr.p_double[i]; tmp->sclagtmp1.ptr.p_double[i] = (double)(0); /* * Primary Lagrangian term */ if( i0 ) { vact = v; vd = (double)(1); } else { vd = 1/(1-dampingfactor*v); vact = v*vd; vd = vd*vd; } *f = *f+vlag*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+vlag*vd; /* * Quadratic augmentation term */ if( i0 ) { vact = v; } else { vact = (double)(0); } *f = *f+0.5*nlcslp_augmentationfactor*vact*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+nlcslp_augmentationfactor*vact; } if( usesparsegemv ) { sparsemtv(&state->subsolver.sparserawlc, &tmp->sclagtmp1, &tmp->sclagtmp0, _state); for(i=0; i<=n-1; i++) { g->ptr.p_double[i] = g->ptr.p_double[i]+tmp->sclagtmp0.ptr.p_double[i]; } } else { rmatrixgemv(n, nec+nic, 1.0, &state->scaledcleic, 0, 0, 1, &tmp->sclagtmp1, 0, 1.0, g, 0, _state); } } /* * Lagrangian terms for nonlinear constraints */ rvectorsetlengthatleast(&tmp->sclagtmp1, nlec+nlic, _state); for(i=0; i<=nlec+nlic-1; i++) { v = fi->ptr.p_double[1+i]; if( i0 ) { /* * Either equality constraint or violated inequality one. * Update violation report. */ vviolate = ae_fabs(v, _state)*state->fscales.ptr.p_double[1+i]; if( vviolate>*nlcerr ) { *nlcerr = vviolate; *nlcidx = i; } } vlag = lagmult->ptr.p_double[nec+nic+i]; tmp->sclagtmp1.ptr.p_double[i] = (double)(0); /* * Lagrangian term */ if( i0 ) { vact = v; vd = (double)(1); } else { vd = 1/(1-dampingfactor*v); vact = v*vd; vd = vd*vd; } *f = *f+vlag*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+vlag*vd; /* * Augmentation term */ if( i0 ) { vact = v; } else { vact = (double)(0); } *f = *f+0.5*nlcslp_augmentationfactor*vact*vact; tmp->sclagtmp1.ptr.p_double[i] = tmp->sclagtmp1.ptr.p_double[i]+nlcslp_augmentationfactor*vact; } rmatrixgemv(n, nlec+nlic, 1.0, j, 1, 0, 1, &tmp->sclagtmp1, 0, 1.0, g, 0, _state); } /************************************************************************* This function calculates L1-penalized merit function *************************************************************************/ static double nlcslp_meritfunction(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, ae_state *_state) { double tmp0; double tmp1; double result; nlcslp_meritfunctionandrawlagrangian(state, x, fi, lagmult, tmp, &tmp0, &tmp1, _state); result = tmp0; return result; } /************************************************************************* This function calculates raw (unaugmented and smooth) Lagrangian *************************************************************************/ static double nlcslp_rawlagrangian(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, ae_state *_state) { double tmp0; double tmp1; double result; nlcslp_meritfunctionandrawlagrangian(state, x, fi, lagmult, tmp, &tmp0, &tmp1, _state); result = tmp1; return result; } /************************************************************************* This function calculates L1-penalized merit function and raw (smooth and un-augmented) Lagrangian *************************************************************************/ static void nlcslp_meritfunctionandrawlagrangian(minslpstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* fi, /* Real */ ae_vector* lagmult, minslptmpmerit* tmp, double* meritf, double* rawlag, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t nlec; ae_int_t nlic; double v; *meritf = 0; *rawlag = 0; n = state->n; nec = state->nec; nic = state->nic; nlec = state->nlec; nlic = state->nlic; /* * Merit function and Lagrangian: primary term */ *meritf = fi->ptr.p_double[0]; *rawlag = fi->ptr.p_double[0]; /* * Merit function: augmentation and penalty for linear constraints */ rvectorsetlengthatleast(&tmp->mftmp0, nec+nic, _state); rmatrixgemv(nec+nic, n, 1.0, &state->scaledcleic, 0, 0, 0, x, 0, 0.0, &tmp->mftmp0, 0, _state); for(i=0; i<=nec+nic-1; i++) { v = tmp->mftmp0.ptr.p_double[i]-state->scaledcleic.ptr.pp_double[i][n]; if( iptr.p_double[i], _state)*ae_fabs(v, _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[i]*v; } else { /* * Merit function: augmentation term + L1 penalty term */ *meritf = *meritf+0.5*nlcslp_augmentationfactor*ae_sqr(ae_maxreal(v, (double)(0), _state), _state); *meritf = *meritf+nlcslp_meritfunctionbase*ae_maxreal(v, (double)(0), _state)+nlcslp_meritfunctiongain*ae_maxreal(lagmult->ptr.p_double[i]*v, (double)(0), _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[i]*v; } } /* * Merit function: augmentation and penalty for nonlinear constraints */ for(i=0; i<=nlec+nlic-1; i++) { v = fi->ptr.p_double[1+i]; if( iptr.p_double[nec+nic+i]*v, _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[nec+nic+i]*v; } else { /* * Merit function: augmentation term + L1 penalty term */ *meritf = *meritf+0.5*nlcslp_augmentationfactor*ae_sqr(ae_maxreal(v, (double)(0), _state), _state); *meritf = *meritf+nlcslp_meritfunctionbase*ae_maxreal(v, (double)(0), _state)+nlcslp_meritfunctiongain*ae_maxreal(lagmult->ptr.p_double[nec+nic+i]*v, (double)(0), _state); /* * Raw Lagrangian */ *rawlag = *rawlag+lagmult->ptr.p_double[nec+nic+i]*v; } } } void _minslpsubsolver_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslpsubsolver *p = (minslpsubsolver*)_p; ae_touch_ptr((void*)p); _dualsimplexstate_init(&p->dss, _state, make_automatic); _dualsimplexsettings_init(&p->dsssettings, _state, make_automatic); _dualsimplexbasis_init(&p->lastbasis, _state, make_automatic); ae_matrix_init(&p->curd, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curb, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cural, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->curau, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparserawlc, _state, make_automatic); _sparsematrix_init(&p->sparseefflc, _state, make_automatic); ae_matrix_init(&p->h, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->curhd, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->densedummy, 0, 0, DT_REAL, _state, make_automatic); _sparsematrix_init(&p->sparsedummy, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic); } void _minslpsubsolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslpsubsolver *dst = (minslpsubsolver*)_dst; minslpsubsolver *src = (minslpsubsolver*)_src; _dualsimplexstate_init_copy(&dst->dss, &src->dss, _state, make_automatic); _dualsimplexsettings_init_copy(&dst->dsssettings, &src->dsssettings, _state, make_automatic); _dualsimplexbasis_init_copy(&dst->lastbasis, &src->lastbasis, _state, make_automatic); dst->basispresent = src->basispresent; ae_matrix_init_copy(&dst->curd, &src->curd, _state, make_automatic); dst->curdcnt = src->curdcnt; ae_vector_init_copy(&dst->curb, &src->curb, _state, make_automatic); ae_vector_init_copy(&dst->curbndl, &src->curbndl, _state, make_automatic); ae_vector_init_copy(&dst->curbndu, &src->curbndu, _state, make_automatic); ae_vector_init_copy(&dst->cural, &src->cural, _state, make_automatic); ae_vector_init_copy(&dst->curau, &src->curau, _state, make_automatic); _sparsematrix_init_copy(&dst->sparserawlc, &src->sparserawlc, _state, make_automatic); _sparsematrix_init_copy(&dst->sparseefflc, &src->sparseefflc, _state, make_automatic); dst->hessiantype = src->hessiantype; ae_matrix_init_copy(&dst->h, &src->h, _state, make_automatic); ae_matrix_init_copy(&dst->curhd, &src->curhd, _state, make_automatic); ae_matrix_init_copy(&dst->densedummy, &src->densedummy, _state, make_automatic); _sparsematrix_init_copy(&dst->sparsedummy, &src->sparsedummy, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_vector_init_copy(&dst->sk, &src->sk, _state, make_automatic); ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic); } void _minslpsubsolver_clear(void* _p) { minslpsubsolver *p = (minslpsubsolver*)_p; ae_touch_ptr((void*)p); _dualsimplexstate_clear(&p->dss); _dualsimplexsettings_clear(&p->dsssettings); _dualsimplexbasis_clear(&p->lastbasis); ae_matrix_clear(&p->curd); ae_vector_clear(&p->curb); ae_vector_clear(&p->curbndl); ae_vector_clear(&p->curbndu); ae_vector_clear(&p->cural); ae_vector_clear(&p->curau); _sparsematrix_clear(&p->sparserawlc); _sparsematrix_clear(&p->sparseefflc); ae_matrix_clear(&p->h); ae_matrix_clear(&p->curhd); ae_matrix_clear(&p->densedummy); _sparsematrix_clear(&p->sparsedummy); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_vector_clear(&p->sk); ae_vector_clear(&p->yk); } void _minslpsubsolver_destroy(void* _p) { minslpsubsolver *p = (minslpsubsolver*)_p; ae_touch_ptr((void*)p); _dualsimplexstate_destroy(&p->dss); _dualsimplexsettings_destroy(&p->dsssettings); _dualsimplexbasis_destroy(&p->lastbasis); ae_matrix_destroy(&p->curd); ae_vector_destroy(&p->curb); ae_vector_destroy(&p->curbndl); ae_vector_destroy(&p->curbndu); ae_vector_destroy(&p->cural); ae_vector_destroy(&p->curau); _sparsematrix_destroy(&p->sparserawlc); _sparsematrix_destroy(&p->sparseefflc); ae_matrix_destroy(&p->h); ae_matrix_destroy(&p->curhd); ae_matrix_destroy(&p->densedummy); _sparsematrix_destroy(&p->sparsedummy); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_vector_destroy(&p->sk); ae_vector_destroy(&p->yk); } void _minslptmplagrangian_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslptmplagrangian *p = (minslptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->sclagtmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->sclagtmp1, 0, DT_REAL, _state, make_automatic); } void _minslptmplagrangian_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslptmplagrangian *dst = (minslptmplagrangian*)_dst; minslptmplagrangian *src = (minslptmplagrangian*)_src; ae_vector_init_copy(&dst->sclagtmp0, &src->sclagtmp0, _state, make_automatic); ae_vector_init_copy(&dst->sclagtmp1, &src->sclagtmp1, _state, make_automatic); } void _minslptmplagrangian_clear(void* _p) { minslptmplagrangian *p = (minslptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->sclagtmp0); ae_vector_clear(&p->sclagtmp1); } void _minslptmplagrangian_destroy(void* _p) { minslptmplagrangian *p = (minslptmplagrangian*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->sclagtmp0); ae_vector_destroy(&p->sclagtmp1); } void _minslptmpmerit_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslptmpmerit *p = (minslptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->mftmp0, 0, DT_REAL, _state, make_automatic); } void _minslptmpmerit_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslptmpmerit *dst = (minslptmpmerit*)_dst; minslptmpmerit *src = (minslptmpmerit*)_src; ae_vector_init_copy(&dst->mftmp0, &src->mftmp0, _state, make_automatic); } void _minslptmpmerit_clear(void* _p) { minslptmpmerit *p = (minslptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->mftmp0); } void _minslptmpmerit_destroy(void* _p) { minslptmpmerit *p = (minslptmpmerit*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->mftmp0); } void _minslpphase13state_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslpphase13state *p = (minslpphase13state*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkxc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkxn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfic, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfin, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjc, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjn, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dummylagmult, 0, DT_REAL, _state, make_automatic); _minslptmpmerit_init(&p->tmpmerit, _state, make_automatic); _rcommstate_init(&p->rphase13state, _state, make_automatic); } void _minslpphase13state_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslpphase13state *dst = (minslpphase13state*)_dst; minslpphase13state *src = (minslpphase13state*)_src; dst->usecorrection = src->usecorrection; ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->dx, &src->dx, _state, make_automatic); ae_vector_init_copy(&dst->stepkxc, &src->stepkxc, _state, make_automatic); ae_vector_init_copy(&dst->stepkxn, &src->stepkxn, _state, make_automatic); ae_vector_init_copy(&dst->stepkfic, &src->stepkfic, _state, make_automatic); ae_vector_init_copy(&dst->stepkfin, &src->stepkfin, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjc, &src->stepkjc, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjn, &src->stepkjn, _state, make_automatic); ae_vector_init_copy(&dst->dummylagmult, &src->dummylagmult, _state, make_automatic); _minslptmpmerit_init_copy(&dst->tmpmerit, &src->tmpmerit, _state, make_automatic); _rcommstate_init_copy(&dst->rphase13state, &src->rphase13state, _state, make_automatic); } void _minslpphase13state_clear(void* _p) { minslpphase13state *p = (minslpphase13state*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->d); ae_vector_clear(&p->dx); ae_vector_clear(&p->stepkxc); ae_vector_clear(&p->stepkxn); ae_vector_clear(&p->stepkfic); ae_vector_clear(&p->stepkfin); ae_matrix_clear(&p->stepkjc); ae_matrix_clear(&p->stepkjn); ae_vector_clear(&p->dummylagmult); _minslptmpmerit_clear(&p->tmpmerit); _rcommstate_clear(&p->rphase13state); } void _minslpphase13state_destroy(void* _p) { minslpphase13state *p = (minslpphase13state*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->d); ae_vector_destroy(&p->dx); ae_vector_destroy(&p->stepkxc); ae_vector_destroy(&p->stepkxn); ae_vector_destroy(&p->stepkfic); ae_vector_destroy(&p->stepkfin); ae_matrix_destroy(&p->stepkjc); ae_matrix_destroy(&p->stepkjn); ae_vector_destroy(&p->dummylagmult); _minslptmpmerit_destroy(&p->tmpmerit); _rcommstate_destroy(&p->rphase13state); } void _minslpphase2state_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslpphase2state *p = (minslpphase2state*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->stepkxn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkxc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfin, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfic, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjn, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkjc, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepklaggrad, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepknlaggrad, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepknlagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->meritlagmult, 0, DT_REAL, _state, make_automatic); _minslptmplagrangian_init(&p->tmplagrangianfg, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); _linminstate_init(&p->mcstate, _state, make_automatic); _minslptmpmerit_init(&p->tmpmerit, _state, make_automatic); _rcommstate_init(&p->rphase2state, _state, make_automatic); } void _minslpphase2state_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslpphase2state *dst = (minslpphase2state*)_dst; minslpphase2state *src = (minslpphase2state*)_src; ae_vector_init_copy(&dst->stepkxn, &src->stepkxn, _state, make_automatic); ae_vector_init_copy(&dst->stepkxc, &src->stepkxc, _state, make_automatic); ae_vector_init_copy(&dst->stepkfin, &src->stepkfin, _state, make_automatic); ae_vector_init_copy(&dst->stepkfic, &src->stepkfic, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjn, &src->stepkjn, _state, make_automatic); ae_matrix_init_copy(&dst->stepkjc, &src->stepkjc, _state, make_automatic); ae_vector_init_copy(&dst->stepklaggrad, &src->stepklaggrad, _state, make_automatic); ae_vector_init_copy(&dst->stepknlaggrad, &src->stepknlaggrad, _state, make_automatic); ae_vector_init_copy(&dst->stepknlagmult, &src->stepknlagmult, _state, make_automatic); ae_vector_init_copy(&dst->meritlagmult, &src->meritlagmult, _state, make_automatic); _minslptmplagrangian_init_copy(&dst->tmplagrangianfg, &src->tmplagrangianfg, _state, make_automatic); dst->lastlcerr = src->lastlcerr; dst->lastlcidx = src->lastlcidx; dst->lastnlcerr = src->lastnlcerr; dst->lastnlcidx = src->lastnlcidx; ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); _linminstate_init_copy(&dst->mcstate, &src->mcstate, _state, make_automatic); _minslptmpmerit_init_copy(&dst->tmpmerit, &src->tmpmerit, _state, make_automatic); _rcommstate_init_copy(&dst->rphase2state, &src->rphase2state, _state, make_automatic); } void _minslpphase2state_clear(void* _p) { minslpphase2state *p = (minslpphase2state*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->stepkxn); ae_vector_clear(&p->stepkxc); ae_vector_clear(&p->stepkfin); ae_vector_clear(&p->stepkfic); ae_matrix_clear(&p->stepkjn); ae_matrix_clear(&p->stepkjc); ae_vector_clear(&p->stepklaggrad); ae_vector_clear(&p->stepknlaggrad); ae_vector_clear(&p->stepknlagmult); ae_vector_clear(&p->meritlagmult); _minslptmplagrangian_clear(&p->tmplagrangianfg); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->d); _linminstate_clear(&p->mcstate); _minslptmpmerit_clear(&p->tmpmerit); _rcommstate_clear(&p->rphase2state); } void _minslpphase2state_destroy(void* _p) { minslpphase2state *p = (minslpphase2state*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->stepkxn); ae_vector_destroy(&p->stepkxc); ae_vector_destroy(&p->stepkfin); ae_vector_destroy(&p->stepkfic); ae_matrix_destroy(&p->stepkjn); ae_matrix_destroy(&p->stepkjc); ae_vector_destroy(&p->stepklaggrad); ae_vector_destroy(&p->stepknlaggrad); ae_vector_destroy(&p->stepknlagmult); ae_vector_destroy(&p->meritlagmult); _minslptmplagrangian_destroy(&p->tmplagrangianfg); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->d); _linminstate_destroy(&p->mcstate); _minslptmpmerit_destroy(&p->tmpmerit); _rcommstate_destroy(&p->rphase2state); } void _minslpstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minslpstate *p = (minslpstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->scaledcleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->lcsrcidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->scaledbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->scaledbndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); _minslpphase13state_init(&p->state13, _state, make_automatic); _minslpphase2state_init(&p->state2, _state, make_automatic); ae_vector_init(&p->step0x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->backupx, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->step0fi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->stepkfi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->backupfi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->step0j, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->stepkj, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->backupj, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->meritlagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dummylagmult, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fscales, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->meritfunctionhistory, 0, DT_REAL, _state, make_automatic); _minslpsubsolver_init(&p->subsolver, _state, make_automatic); _minslptmpmerit_init(&p->tmpmerit, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); } void _minslpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minslpstate *dst = (minslpstate*)_dst; minslpstate *src = (minslpstate*)_src; dst->n = src->n; dst->nec = src->nec; dst->nic = src->nic; dst->nlec = src->nlec; dst->nlic = src->nlic; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_matrix_init_copy(&dst->scaledcleic, &src->scaledcleic, _state, make_automatic); ae_vector_init_copy(&dst->lcsrcidx, &src->lcsrcidx, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndl, &src->scaledbndl, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndu, &src->scaledbndu, _state, make_automatic); dst->epsx = src->epsx; dst->maxits = src->maxits; dst->hessiantype = src->hessiantype; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); dst->f = src->f; dst->needfij = src->needfij; dst->xupdated = src->xupdated; _minslpphase13state_init_copy(&dst->state13, &src->state13, _state, make_automatic); _minslpphase2state_init_copy(&dst->state2, &src->state2, _state, make_automatic); dst->trustrad = src->trustrad; dst->lpfailurecnt = src->lpfailurecnt; dst->fstagnationcnt = src->fstagnationcnt; ae_vector_init_copy(&dst->step0x, &src->step0x, _state, make_automatic); ae_vector_init_copy(&dst->stepkx, &src->stepkx, _state, make_automatic); ae_vector_init_copy(&dst->backupx, &src->backupx, _state, make_automatic); ae_vector_init_copy(&dst->step0fi, &src->step0fi, _state, make_automatic); ae_vector_init_copy(&dst->stepkfi, &src->stepkfi, _state, make_automatic); ae_vector_init_copy(&dst->backupfi, &src->backupfi, _state, make_automatic); ae_matrix_init_copy(&dst->step0j, &src->step0j, _state, make_automatic); ae_matrix_init_copy(&dst->stepkj, &src->stepkj, _state, make_automatic); ae_matrix_init_copy(&dst->backupj, &src->backupj, _state, make_automatic); ae_vector_init_copy(&dst->meritlagmult, &src->meritlagmult, _state, make_automatic); ae_vector_init_copy(&dst->dummylagmult, &src->dummylagmult, _state, make_automatic); ae_vector_init_copy(&dst->fscales, &src->fscales, _state, make_automatic); ae_vector_init_copy(&dst->meritfunctionhistory, &src->meritfunctionhistory, _state, make_automatic); dst->historylen = src->historylen; _minslpsubsolver_init_copy(&dst->subsolver, &src->subsolver, _state, make_automatic); _minslptmpmerit_init_copy(&dst->tmpmerit, &src->tmpmerit, _state, make_automatic); dst->repsimplexiterations = src->repsimplexiterations; dst->repsimplexiterations1 = src->repsimplexiterations1; dst->repsimplexiterations2 = src->repsimplexiterations2; dst->repsimplexiterations3 = src->repsimplexiterations3; dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repterminationtype = src->repterminationtype; dst->repbcerr = src->repbcerr; dst->repbcidx = src->repbcidx; dst->replcerr = src->replcerr; dst->replcidx = src->replcidx; dst->repnlcerr = src->repnlcerr; dst->repnlcidx = src->repnlcidx; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); } void _minslpstate_clear(void* _p) { minslpstate *p = (minslpstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_matrix_clear(&p->scaledcleic); ae_vector_clear(&p->lcsrcidx); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->scaledbndl); ae_vector_clear(&p->scaledbndu); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); _minslpphase13state_clear(&p->state13); _minslpphase2state_clear(&p->state2); ae_vector_clear(&p->step0x); ae_vector_clear(&p->stepkx); ae_vector_clear(&p->backupx); ae_vector_clear(&p->step0fi); ae_vector_clear(&p->stepkfi); ae_vector_clear(&p->backupfi); ae_matrix_clear(&p->step0j); ae_matrix_clear(&p->stepkj); ae_matrix_clear(&p->backupj); ae_vector_clear(&p->meritlagmult); ae_vector_clear(&p->dummylagmult); ae_vector_clear(&p->fscales); ae_vector_clear(&p->meritfunctionhistory); _minslpsubsolver_clear(&p->subsolver); _minslptmpmerit_clear(&p->tmpmerit); _rcommstate_clear(&p->rstate); } void _minslpstate_destroy(void* _p) { minslpstate *p = (minslpstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_matrix_destroy(&p->scaledcleic); ae_vector_destroy(&p->lcsrcidx); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->scaledbndl); ae_vector_destroy(&p->scaledbndu); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); _minslpphase13state_destroy(&p->state13); _minslpphase2state_destroy(&p->state2); ae_vector_destroy(&p->step0x); ae_vector_destroy(&p->stepkx); ae_vector_destroy(&p->backupx); ae_vector_destroy(&p->step0fi); ae_vector_destroy(&p->stepkfi); ae_vector_destroy(&p->backupfi); ae_matrix_destroy(&p->step0j); ae_matrix_destroy(&p->stepkj); ae_matrix_destroy(&p->backupj); ae_vector_destroy(&p->meritlagmult); ae_vector_destroy(&p->dummylagmult); ae_vector_destroy(&p->fscales); ae_vector_destroy(&p->meritfunctionhistory); _minslpsubsolver_destroy(&p->subsolver); _minslptmpmerit_destroy(&p->tmpmerit); _rcommstate_destroy(&p->rstate); } #endif #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* NONLINEARLY CONSTRAINED OPTIMIZATION WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 REQUIREMENTS: * user must provide function value and gradient for F(), H(), G() * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuously differentiable on the feasible set and its neighborhood * nonlinear constraints G() and H() must have non-zero gradient at G(x)=0 and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is NOT supported. USAGE: Constrained optimization if far more complex than the unconstrained one. Nonlinearly constrained optimization is one of the most esoteric numerical procedures. Here we give very brief outline of the MinNLC optimizer. We strongly recommend you to study examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinNLCCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * SQP solver, recommended for medium-scale problems (less than thousand of variables) with hard-to-evaluate target functions. Requires less function evaluations than other solvers but each step involves solution of QP subproblem, so running time may be higher than that of AUL (another recommended option). Activated with minnlcsetalgosqp() function. * AUL solver with dense preconditioner, recommended for large-scale problems or for problems with cheap target function. Needs more function evaluations that SQP (about 5x-10x times more), but its iterations are much cheaper that that of SQP. Activated with minnlcsetalgoaul() function. * SLP solver, successive linear programming. The slowest one, requires more target function evaluations that SQP and AUL. However, it is somewhat more robust in tricky cases, so it can be used as a backup plan. Activated with minnlcsetalgoslp() function. 2. [optional] user activates OptGuard integrity checker which tries to detect possible errors in the user-supplied callbacks: * discontinuity/nonsmoothness of the target/nonlinear constraints * errors in the analytic gradient provided by user This feature is essential for early prototyping stages because it helps to catch common coding and problem statement errors. OptGuard can be activated with following functions (one per each check performed): * minnlcoptguardsmoothness() * minnlcoptguardgradient() 3. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) minnlcsetbc() for boundary constraints b) minnlcsetlc() for linear constraints c) minnlcsetnlc() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 4. User sets scale of the variables with minnlcsetscale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 5. User sets stopping conditions with minnlcsetcond(). If NLC solver uses inner/outer iteration layout, this function sets stopping conditions for INNER iterations. 6. Finally, user calls minnlcoptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G/H. 7. User calls minnlcresults() to get solution; additionally you can retrieve OptGuard report with minnlcoptguardresults(), and get detailed report about purported errors in the target function with: * minnlcoptguardnonc1test0results() * minnlcoptguardnonc1test1results() 8. Optionally user may call minnlcrestartfrom() to solve another problem with same N but another starting point. minnlcrestartfrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlccreate(ae_int_t n, /* Real */ ae_vector* x, minnlcstate* state, ae_state *_state) { _minnlcstate_clear(state); ae_assert(n>=1, "MinNLCCreate: N<1", _state); ae_assert(x->cnt>=n, "MinNLCCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinNLCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large TRUNCATION errors, while too small step will result in too large NUMERICAL errors. 1.0E-4 can be good value to start from. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlccreatef(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnlcstate* state, ae_state *_state) { _minnlcstate_clear(state); ae_assert(n>=1, "MinNLCCreateF: N<1", _state); ae_assert(x->cnt>=n, "MinNLCCreateF: Length(X)n; ae_assert(bndl->cnt>=n, "MinNLCSetBC: Length(BndL)cnt>=n, "MinNLCSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinNLCSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinNLCSetBC: BndL contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } } /************************************************************************* This function sets linear constraints for MinNLC optimizer. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with MinNLCRestartFrom(). You may combine linear constraints with boundary ones - and with nonlinear ones! If your problem has mixed constraints, you may explicitly specify some of them as linear. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE 1: when you solve your problem with augmented Lagrangian solver, linear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetlc(minnlcstate* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_int_t n; ae_int_t i; n = state->n; /* * First, check for errors in the inputs */ ae_assert(k>=0, "MinNLCSetLC: K<0", _state); ae_assert(c->cols>=n+1||k==0, "MinNLCSetLC: Cols(C)rows>=k, "MinNLCSetLC: Rows(C)cnt>=k, "MinNLCSetLC: Length(CT)nec = 0; state->nic = 0; return; } /* * Equality constraints are stored first, in the upper * NEC rows of State.CLEIC matrix. Inequality constraints * are stored in the next NIC rows. * * NOTE: we convert inequality constraints to the form * A*x<=b before copying them. */ rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); ivectorsetlengthatleast(&state->lcsrcidx, k, _state); state->nec = 0; state->nic = 0; for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]==0 ) { ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); state->lcsrcidx.ptr.p_int[state->nec] = i; state->nec = state->nec+1; } } for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]!=0 ) { if( ct->ptr.p_int[i]>0 ) { ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } else { ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } state->lcsrcidx.ptr.p_int[state->nec+state->nic] = i; state->nic = state->nic+1; } } } /************************************************************************* This function sets nonlinear constraints for MinNLC optimizer. In fact, this function sets NUMBER of nonlinear constraints. Constraints itself (constraint functions) are passed to MinNLCOptimize() method. This method requires user-defined vector function F[] and its Jacobian J[], where: * first component of F[] and first row of Jacobian J[] corresponds to function being minimized * next NLEC components of F[] (and rows of J) correspond to nonlinear equality constraints G_i(x)=0 * next NLIC components of F[] (and rows of J) correspond to nonlinear inequality constraints H_i(x)<=0 NOTE: you may combine nonlinear constraints with linear/boundary ones. If your problem has mixed constraints, you may explicitly specify some of them as linear ones. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 NOTE 1: when you solve your problem with augmented Lagrangian solver, nonlinear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of feasible area! NOTE 2: algorithm scales variables according to scale specified by MinNLCSetScale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, there is no way to automatically scale nonlinear constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may ruin convergence. Solving problem with constraint "1000*G0(x)=0" is NOT same as solving it with constraint "0.001*G0(x)=0". It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to scale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has approximately unit magnitude (for problems with unit scale) or has magnitude approximately equal to 1/S[i] (where S is a scale set by MinNLCSetScale() function). -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetnlc(minnlcstate* state, ae_int_t nlec, ae_int_t nlic, ae_state *_state) { ae_assert(nlec>=0, "MinNLCSetNLC: NLEC<0", _state); ae_assert(nlic>=0, "MinNLCSetNLC: NLIC<0", _state); state->ng = nlec; state->nh = nlic; ae_vector_set_length(&state->fi, 1+state->ng+state->nh, _state); ae_matrix_set_length(&state->j, 1+state->ng+state->nh, state->n, _state); } /************************************************************************* This function sets stopping conditions for inner iterations of optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinNLCSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic selection of the stopping condition. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetcond(minnlcstate* state, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsx, _state), "MinNLCSetCond: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinNLCSetCond: negative EpsX", _state); ae_assert(maxits>=0, "MinNLCSetCond: negative MaxIts!", _state); if( ae_fp_eq(epsx,(double)(0))&&maxits==0 ) { epsx = 1.0E-8; } state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function sets scaling coefficients for NLC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetscale(minnlcstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinNLCSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinNLCSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinNLCSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* This function sets preconditioner to "inexact LBFGS-based" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). Inexact LBFGS-based preconditioner uses L-BFGS formula combined with orthogonality assumption to perform very fast updates. For a N-dimensional problem with K general linear or nonlinear constraints (boundary ones are not counted) it has O(N*K) cost per iteration. This preconditioner has best quality (less iterations) when general linear and nonlinear constraints are orthogonal to each other (orthogonality with respect to boundary constraints is not required). Number of iterations increases when constraints are non-orthogonal, because algorithm assumes orthogonality, but still it is better than no preconditioner at all. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecinexact(minnlcstate* state, ae_state *_state) { state->updatefreq = 0; state->prectype = 1; } /************************************************************************* This function sets preconditioner to "exact low rank" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). It also provides special unpreconditioned mode of operation which can be used for test purposes. Comments below discuss low rank preconditioner. Exact low-rank preconditioner uses Woodbury matrix identity to build quadratic model of the penalized function. It has following features: * no special assumptions about orthogonality of constraints * preconditioner evaluation is optimized for K<=N. * finally, stability of the process is guaranteed only for K<=N due to degeneracy of intermediate matrices. That's why we recommend to use "exact robust" preconditioner for such cases. RECOMMENDATIONS We recommend to choose between "exact low rank" and "exact robust" preconditioners, with "low rank" version being chosen when you know in advance that total count of non-box constraints won't exceed N, and "robust" version being chosen when you need bulletproof solution. INPUT PARAMETERS: State - structure stores algorithm state UpdateFreq- update frequency. Preconditioner is rebuilt after every UpdateFreq iterations. Recommended value: 10 or higher. Zero value means that good default value will be used. -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecexactlowrank(minnlcstate* state, ae_int_t updatefreq, ae_state *_state) { ae_assert(updatefreq>=0, "MinNLCSetPrecExactLowRank: UpdateFreq<0", _state); if( updatefreq==0 ) { updatefreq = 10; } state->prectype = 2; state->updatefreq = updatefreq; } /************************************************************************* This function sets preconditioner to "exact robust" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may use following preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, with O(N*K) evaluation time b) exact low rank one, with O(N*K^2) evaluation time c) exact robust one, with O(N^3+K*N^2) evaluation time where K is a total number of general linear and nonlinear constraints (box ones are not counted). It also provides special unpreconditioned mode of operation which can be used for test purposes. Comments below discuss robust preconditioner. Exact robust preconditioner uses Cholesky decomposition to invert approximate Hessian matrix H=D+W'*C*W (where D stands for diagonal terms of Hessian, combined result of initial scaling matrix and penalty from box constraints; W stands for general linear constraints and linearization of nonlinear ones; C stands for diagonal matrix of penalty coefficients). This preconditioner has following features: * no special assumptions about constraint structure * preconditioner is optimized for stability; unlike "exact low rank" version which fails for K>=N, this one works well for any value of K. * the only drawback is that is takes O(N^3+K*N^2) time to build it. No economical Woodbury update is applied even when it makes sense, thus there are exist situations (K<=0, "MinNLCSetPrecExactLowRank: UpdateFreq<0", _state); if( updatefreq==0 ) { updatefreq = 10; } state->prectype = 3; state->updatefreq = updatefreq; } /************************************************************************* This function sets preconditioner to "turned off" mode. Preconditioning is very important for convergence of Augmented Lagrangian algorithm because presence of penalty term makes problem ill-conditioned. Difference between performance of preconditioned and unpreconditioned methods can be as large as 100x! MinNLC optimizer may utilize two preconditioners, each with its own benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. It also provides special unpreconditioned mode of operation which can be used for test purposes. This function activates this test mode. Do not use it in production code to solve real-life problems. INPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 26.09.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetprecnone(minnlcstate* state, ae_state *_state) { state->updatefreq = 0; state->prectype = 0; } /************************************************************************* This function sets maximum step length (after scaling of step vector with respect to variable scales specified by minnlcsetscale() call). INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: different solvers employed by MinNLC optimizer use different norms for step; AUL solver uses 2-norm, whilst SLP solver uses INF-norm. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minnlcsetstpmax(minnlcstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinNLCSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinNLCSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* This function tells MinNLC unit to use Augmented Lagrangian algorithm for nonlinearly constrained optimization. This algorithm is a slight modification of one described in "A Modified Barrier-Augmented Lagrangian Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK, K. SCHEINBERG, I.YUZEFOVICH. AUL solver can be significantly faster than SQP on easy problems due to cheaper iterations, although it needs more function evaluations. Augmented Lagrangian algorithm works by converting problem of minimizing F(x) subject to equality/inequality constraints to unconstrained problem of the form min[ f(x) + + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) + + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ] where: * Rho is a fixed penalization coefficient * PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce equality constraints * SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune" equality constraints, greatly increasing precision * PENALTY_INEQ(x) is a penalty term which is used to approximately enforce inequality constraints * SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune" inequality constraints, greatly increasing precision * Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during outer iterations of algorithm This version of AUL algorithm uses preconditioner, which greatly accelerates convergence. Because this algorithm is similar to penalty methods, it may perform steps into infeasible area. All kinds of constraints (boundary, linear and nonlinear ones) may be violated in intermediate points - and in the solution. However, properly configured AUL method is significantly better at handling constraints than barrier and/or penalty methods. The very basic outline of algorithm is given below: 1) first outer iteration is performed with "default" values of Lagrange multipliers Nu1/Nu2. Solution quality is low (candidate point can be too far away from true solution; large violation of constraints is possible) and is comparable with that of penalty methods. 2) subsequent outer iterations refine Lagrange multipliers and improve quality of the solution. INPUT PARAMETERS: State - structure which stores algorithm state Rho - penalty coefficient, Rho>0: * large enough that algorithm converges with desired precision. Minimum value is 10*max(S'*diag(H)*S), where S is a scale matrix (set by MinNLCSetScale) and H is a Hessian of the function being minimized. If you can not easily estimate Hessian norm, see our recommendations below. * not TOO large to prevent ill-conditioning * for unit-scale problems (variables and Hessian have unit magnitude), Rho=100 or Rho=1000 can be used. * it is important to note that Rho is internally multiplied by scaling matrix, i.e. optimum value of Rho depends on scale of variables specified by MinNLCSetScale(). ItsCnt - number of outer iterations: * ItsCnt=0 means that small number of outer iterations is automatically chosen (10 iterations in current version). * ItsCnt=1 means that AUL algorithm performs just as usual barrier method. * ItsCnt>1 means that AUL algorithm performs specified number of outer iterations HOW TO CHOOSE PARAMETERS Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm is sometimes hard to tune. Good values of Rho and ItsCnt are problem- specific. In order to help you we prepared following set of recommendations: * for unit-scale problems (variables and Hessian have unit magnitude), Rho=100 or Rho=1000 can be used. * start from some small value of Rho and solve problem with just one outer iteration (ItcCnt=1). In this case algorithm behaves like penalty method. Increase Rho in 2x or 10x steps until you see that one outer iteration returns point which is "rough approximation to solution". It is very important to have Rho so large that penalty term becomes constraining i.e. modified function becomes highly convex in constrained directions. From the other side, too large Rho may prevent you from converging to the solution. You can diagnose it by studying number of inner iterations performed by algorithm: too few (5-10 on 1000-dimensional problem) or too many (orders of magnitude more than dimensionality) usually means that Rho is too large. * with just one outer iteration you usually have low-quality solution. Some constraints can be violated with very large margin, while other ones (which are NOT violated in the true solution) can push final point too far in the inner area of the feasible set. For example, if you have constraint x0>=0 and true solution x0=1, then merely a presence of "x0>=0" will introduce a bias towards larger values of x0. Say, algorithm may stop at x0=1.5 instead of 1.0. * after you found good Rho, you may increase number of outer iterations. ItsCnt=10 is a good value. Subsequent outer iteration will refine values of Lagrange multipliers. Constraints which were violated will be enforced, inactive constraints will be dropped (corresponding multipliers will be decreased). Ideally, you should see 10-1000x improvement in constraint handling (constraint violation is reduced). * if you see that algorithm converges to vicinity of solution, but additional outer iterations do not refine solution, it may mean that algorithm is unstable - it wanders around true solution, but can not approach it. Sometimes algorithm may be stabilized by increasing Rho one more time, making it 5x or 10x larger. SCALING OF CONSTRAINTS [IMPORTANT] AUL optimizer scales variables according to scale specified by MinNLCSetScale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, because function being optimized is a mix of original function and constraint-dependent penalty functions, it is important to rescale both variables AND constraints. Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have constraint whose scale is different from that of target function (another example is 0.000001*x>=0). It is also possible to have constraints whose scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT same as minimizing it subject to 1000000*x>=0. Because we know coefficients of boundary/linear constraints, we can automatically rescale and normalize them. However, there is no way to automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are black boxes. It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has magnitude approximately equal to 1/S[i] (where S is a scale set by MinNLCSetScale() function). WHAT IF IT DOES NOT CONVERGE? It is possible that AUL algorithm fails to converge to precise values of Lagrange multipliers. It stops somewhere around true solution, but candidate point is still too far from solution, and some constraints are violated. Such kind of failure is specific for Lagrangian algorithms - technically, they stop at some point, but this point is not constrained solution. There are exist several reasons why algorithm may fail to converge: a) too loose stopping criteria for inner iteration b) degenerate, redundant constraints c) target function has unconstrained extremum exactly at the boundary of some constraint d) numerical noise in the target function In all these cases algorithm is unstable - each outer iteration results in large and almost random step which improves handling of some constraints, but violates other ones (ideally outer iterations should form a sequence of progressively decreasing steps towards solution). First reason possible is that too loose stopping criteria for inner iteration were specified. Augmented Lagrangian algorithm solves a sequence of intermediate problems, and requries each of them to be solved with high precision. Insufficient precision results in incorrect update of Lagrange multipliers. Another reason is that you may have specified degenerate constraints: say, some constraint was repeated twice. In most cases AUL algorithm gracefully handles such situations, but sometimes it may spend too much time figuring out subtle degeneracies in constraint matrix. Third reason is tricky and hard to diagnose. Consider situation when you minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is located exactly at the boundary of constrained area. In this case algorithm will tend to oscillate between negative and positive x. Each time it stops at x<0 it "reinforces" constraint x>=0, and each time it is bounced to x>0 it "relaxes" constraint (and is attracted to x<0). Such situation sometimes happens in problems with hidden symetries. Algorithm is got caught in a loop with Lagrange multipliers being continuously increased/decreased. Luckily, such loop forms after at least three iterations, so this problem can be solved by DECREASING number of outer iterations down to 1-2 and increasing penalty coefficient Rho as much as possible. Final reason is numerical noise. AUL algorithm is robust against moderate noise (more robust than, say, active set methods), but large noise may destabilize algorithm. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgoaul(minnlcstate* state, double rho, ae_int_t itscnt, ae_state *_state) { ae_assert(itscnt>=0, "MinNLCSetAlgoAUL: negative ItsCnt", _state); ae_assert(ae_isfinite(rho, _state), "MinNLCSetAlgoAUL: Rho is not finite", _state); ae_assert(ae_fp_greater(rho,(double)(0)), "MinNLCSetAlgoAUL: Rho<=0", _state); if( itscnt==0 ) { itscnt = 10; } state->aulitscnt = itscnt; state->rho = rho; state->solvertype = 0; } /************************************************************************* This function tells MinNLC optimizer to use SLP (Successive Linear Programming) algorithm for nonlinearly constrained optimization. This algorithm is a slight modification of one described in "A Linear programming-based optimization algorithm for solving nonlinear programming problems" (2010) by Claus Still and Tapio Westerlund. This solver is the slowest one in ALGLIB, it requires more target function evaluations that SQP and AUL. However it is somewhat more robust in tricky cases, so it can be used as a backup plan. We recommend to use this algo when SQP/AUL do not work (does not return the solution you expect). If trying different approach gives same results, then MAYBE something is wrong with your optimization problem. Despite its name ("linear" = "first order method") this algorithm performs steps similar to that of conjugate gradients method; internally it uses orthogonality/conjugacy requirement for subsequent steps which makes it closer to second order methods in terms of convergence speed. Convergence is proved for the following case: * function and constraints are continuously differentiable (C1 class) * extended Mangasarian–Fromovitz constraint qualification (EMFCQ) holds; in the context of this algorithm EMFCQ means that one can, for any infeasible point, find a search direction such that the constraint infeasibilities are reduced. This algorithm has following nice properties: * no parameters to tune * no convexity requirements for target function or constraints * initial point can be infeasible * algorithm respects box constraints in all intermediate points (it does not even evaluate function outside of box constrained area) * once linear constraints are enforced, algorithm will not violate them * no such guarantees can be provided for nonlinear constraints, but once nonlinear constraints are enforced, algorithm will try to respect them as much as possible * numerical differentiation does not violate box constraints (although general linear and nonlinear ones can be violated during differentiation) * from our experience, this algorithm is somewhat more robust in really difficult cases INPUT PARAMETERS: State - structure which stores algorithm state ===== TRACING SLP SOLVER ================================================= SLP solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'SLP' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. It also prints OptGuard integrity checker report when nonsmoothness of target/constraints is suspected. * 'SLP.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'SLP'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format * 'SLP.PROBING' - to let algorithm insert additional function evaluations before line search in order to build human-readable chart of the raw Lagrangian (~40 additional function evaluations is performed for each line search). This symbol also implicitly defines 'SLP'. * 'OPTGUARD' - for report of smoothness/continuity violations in target and/or constraints. This kind of reporting is included in 'SLP', but it comes with lots of additional info. If you need just smoothness monitoring, specify this setting. NOTE: this tag merely directs OptGuard output to log file. Even if you specify it, you still have to configure OptGuard by calling minnlcoptguard...() family of functions. By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. Specifying 'SLP.PROBING' adds even larger overhead due to additional function evaluations being performed. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("SLP,SLP.PROBING,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 02.04.2018 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgoslp(minnlcstate* state, ae_state *_state) { state->solvertype = 1; } /************************************************************************* This function tells MinNLC optimizer to use SQP (Successive Quadratic Programming) algorithm for nonlinearly constrained optimization. This algorithm needs order of magnitude (5x-10x) less function evaluations than AUL solver, but has higher overhead because each iteration involves solution of quadratic programming problem. Convergence is proved for the following case: * function and constraints are continuously differentiable (C1 class) This algorithm has following nice properties: * no parameters to tune * no convexity requirements for target function or constraints * initial point can be infeasible * algorithm respects box constraints in all intermediate points (it does not even evaluate function outside of box constrained area) * once linear constraints are enforced, algorithm will not violate them * no such guarantees can be provided for nonlinear constraints, but once nonlinear constraints are enforced, algorithm will try to respect them as much as possible * numerical differentiation does not violate box constraints (although general linear and nonlinear ones can be violated during differentiation) We recommend this algorithm as a default option for medium-scale problems (less than thousand of variables) or problems with target function being hard to evaluate. For large-scale problems or ones with very cheap target function AUL solver can be better option. INPUT PARAMETERS: State - structure which stores algorithm state ===== INTERACTION WITH OPTGUARD ========================================== OptGuard integrity checker allows us to catch problems like errors in gradients and discontinuity/nonsmoothness of the target/constraints. Latter kind of problems can be detected by looking upon line searches performed during optimization and searching for signs of nonsmoothness. The problem with SQP is that it is too good for OptGuard to work - it does not perform line searches. It typically needs 1-2 function evaluations per step, and it is not enough for OptGuard to detect nonsmoothness. So, if you suspect that your problem is nonsmooth, we recommend you to use AUL or SLP solvers. ===== TRACING SQP SOLVER ================================================= SQP solver supports advanced tracing capabilities. You can trace algorithm output by specifying following trace symbols (case-insensitive) by means of trace_file() call: * 'SQP' - for basic trace of algorithm steps and decisions. Only short scalars (function values and deltas) are printed. N-dimensional quantities like search directions are NOT printed. It also prints OptGuard integrity checker report when nonsmoothness of target/constraints is suspected. * 'SQP.DETAILED'- for output of points being visited and search directions This symbol also implicitly defines 'SQP'. You can control output format by additionally specifying: * nothing to output in 6-digit exponential format * 'PREC.E15' to output in 15-digit exponential format * 'PREC.F6' to output in 6-digit fixed-point format * 'SQP.PROBING' - to let algorithm insert additional function evaluations before line search in order to build human-readable chart of the raw Lagrangian (~40 additional function evaluations is performed for each line search). This symbol also implicitly defines 'SQP'. By default trace is disabled and adds no overhead to the optimization process. However, specifying any of the symbols adds some formatting and output-related overhead. Specifying 'SQP.PROBING' adds even larger overhead due to additional function evaluations being performed. You may specify multiple symbols by separating them with commas: > > alglib::trace_file("SQP,SQP.PROBING,PREC.F6", "path/to/trace.log") > -- ALGLIB -- Copyright 02.12.2019 by Bochkanov Sergey *************************************************************************/ void minnlcsetalgosqp(minnlcstate* state, ae_state *_state) { state->solvertype = 2; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinNLCOptimize(). NOTE: algorithm passes two parameters to rep() callback - current point and penalized function value at current point. Important - function value which is returned is NOT function being minimized. It is sum of the value of the function being minimized - and penalty term. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcsetxrep(minnlcstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied Jacobian, and one which uses only function vector and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object you should choose appropriate variant of MinNLCOptimize() - one which accepts function AND Jacobian or one which accepts ONLY function. Be careful to choose variant of MinNLCOptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to MinNLCOptimize() and specific function used to create optimizer. | USER PASSED TO MinNLCOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ MinNLCCreateF() | works FAILS MinNLCCreate() | FAILS works Here "FAILS" denotes inappropriate combinations of optimizer creation function and MinNLCOptimize() version. Attemps to use such combination will lead to exception. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ ae_bool minnlciteration(minnlcstate* state, ae_state *_state) { ae_int_t i; ae_int_t k; ae_int_t n; ae_int_t ng; ae_int_t nh; double vleft; double vright; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { i = state->rstate.ia.ptr.p_int[0]; k = state->rstate.ia.ptr.p_int[1]; n = state->rstate.ia.ptr.p_int[2]; ng = state->rstate.ia.ptr.p_int[3]; nh = state->rstate.ia.ptr.p_int[4]; vleft = state->rstate.ra.ptr.p_double[0]; vright = state->rstate.ra.ptr.p_double[1]; } else { i = 359; k = -58; n = -919; ng = -909; nh = 81; vleft = 255; vright = 74; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } if( state->rstate.stage==15 ) { goto lbl_15; } if( state->rstate.stage==16 ) { goto lbl_16; } if( state->rstate.stage==17 ) { goto lbl_17; } if( state->rstate.stage==18 ) { goto lbl_18; } if( state->rstate.stage==19 ) { goto lbl_19; } if( state->rstate.stage==20 ) { goto lbl_20; } if( state->rstate.stage==21 ) { goto lbl_21; } if( state->rstate.stage==22 ) { goto lbl_22; } if( state->rstate.stage==23 ) { goto lbl_23; } if( state->rstate.stage==24 ) { goto lbl_24; } /* * Routine body */ /* * Init */ state->userterminationneeded = ae_false; state->repterminationtype = 0; state->repinneriterationscount = 0; state->repouteriterationscount = 0; state->repnfev = 0; state->repdbgphase0its = 0; state->repbcerr = (double)(0); state->repbcidx = -1; state->replcerr = (double)(0); state->replcidx = -1; state->repnlcerr = (double)(0); state->repnlcidx = -1; n = state->n; ng = state->ng; nh = state->nh; minnlc_clearrequestfields(state, _state); ae_assert(state->smoothnessguardlevel==0||state->smoothnessguardlevel==1, "MinNLCIteration: integrity check failed", _state); smoothnessmonitorinit(&state->smonitor, n, 1+ng+nh, state->smoothnessguardlevel>0, _state); for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; } /* * Check correctness of box constraints */ for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->repterminationtype = -3; state->repbcerr = state->bndl.ptr.p_double[i]-state->bndu.ptr.p_double[i]; state->repbcidx = i; result = ae_false; return result; } } } /* * Test gradient */ if( !(ae_fp_eq(state->diffstep,(double)(0))&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_25; } lbl_27: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xstart, &state->s, &state->bndl, &state->bndu, ae_true, state->teststep, _state) ) { goto lbl_28; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfij = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; for(i=0; i<=ng+nh; i++) { state->smonitor.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->smonitor.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]; } } goto lbl_27; lbl_28: lbl_25: /* * AUL solver */ if( state->solvertype!=0 ) { goto lbl_29; } if( ae_fp_neq(state->diffstep,(double)(0)) ) { rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->fbase, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm2, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp2, 1+ng+nh, _state); } ae_vector_set_length(&state->rstateaul.ia, 8+1, _state); ae_vector_set_length(&state->rstateaul.ra, 7+1, _state); state->rstateaul.stage = -1; lbl_31: if( !minnlc_auliteration(state, &state->smonitor, _state) ) { goto lbl_32; } /* * Numerical differentiation (if needed) - intercept NeedFiJ * request and replace it by sequence of NeedFi requests */ if( !(ae_fp_neq(state->diffstep,(double)(0))&&state->needfij) ) { goto lbl_33; } state->needfij = ae_false; state->needfi = ae_true; ae_v_move(&state->xbase.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); k = 0; lbl_35: if( k>n-1 ) { goto lbl_37; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: ae_v_move(&state->fm2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: ae_v_move(&state->fp2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (8*(state->fp1.ptr.p_double[i]-state->fm1.ptr.p_double[i])-(state->fp2.ptr.p_double[i]-state->fm2.ptr.p_double[i]))/(6*state->diffstep*state->s.ptr.p_double[i]); } k = k+1; goto lbl_35; lbl_37: ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->rstate.stage = 5; goto lbl_rcomm; lbl_5: /* * Restore previous values of fields and continue */ state->needfi = ae_false; state->needfij = ae_true; goto lbl_31; lbl_33: /* * Forward request to caller */ state->rstate.stage = 6; goto lbl_rcomm; lbl_6: goto lbl_31; lbl_32: result = ae_false; return result; lbl_29: /* * SLP solver */ if( state->solvertype!=1 ) { goto lbl_38; } if( ae_fp_neq(state->diffstep,(double)(0)) ) { rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->fbase, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm2, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp2, 1+ng+nh, _state); } minslpinitbuf(&state->bndl, &state->bndu, &state->s, &state->xstart, n, &state->cleic, &state->lcsrcidx, state->nec, state->nic, state->ng, state->nh, state->epsx, state->maxits, &state->slpsolverstate, _state); lbl_40: if( !minslpiteration(&state->slpsolverstate, &state->smonitor, state->userterminationneeded, _state) ) { goto lbl_41; } /* * Forward request to caller */ if( !state->slpsolverstate.needfij ) { goto lbl_42; } /* * Evaluate target function/Jacobian */ if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_44; } /* * Analytic Jacobian is provided */ minnlc_unscale(state, &state->slpsolverstate.x, &state->slpsolverstate.scaledbndl, &state->slpsolverstate.scaledbndu, &state->x, _state); state->needfij = ae_true; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->needfij = ae_false; for(i=0; i<=ng+nh; i++) { state->slpsolverstate.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->slpsolverstate.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]*state->s.ptr.p_double[k]; } } goto lbl_45; lbl_44: /* * Numerical differentiation */ state->needfij = ae_false; state->needfi = ae_true; minnlc_unscale(state, &state->slpsolverstate.x, &state->slpsolverstate.scaledbndl, &state->slpsolverstate.scaledbndu, &state->xbase, _state); k = 0; lbl_46: if( k>n-1 ) { goto lbl_48; } vleft = state->xbase.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; vright = state->xbase.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; if( !((state->hasbndl.ptr.p_bool[k]&&ae_fp_less(vleft,state->bndl.ptr.p_double[k]))||(state->hasbndu.ptr.p_bool[k]&&ae_fp_greater(vright,state->bndu.ptr.p_double[k]))) ) { goto lbl_49; } /* * Box constraint is violated by 4-point centered formula, use 2-point uncentered one */ if( state->hasbndl.ptr.p_bool[k]&&ae_fp_less(vleft,state->bndl.ptr.p_double[k]) ) { vleft = state->bndl.ptr.p_double[k]; } if( state->hasbndu.ptr.p_bool[k]&&ae_fp_greater(vright,state->bndu.ptr.p_double[k]) ) { vright = state->bndu.ptr.p_double[k]; } ae_assert(ae_fp_less_eq(vleft,vright), "MinNLC: integrity check failed", _state); if( ae_fp_eq(vleft,vright) ) { /* * Fixed variable */ for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (double)(0); } goto lbl_47; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = vleft; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = vright; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (state->fp1.ptr.p_double[i]-state->fm1.ptr.p_double[i])/(vright-vleft); } goto lbl_50; lbl_49: /* * 4-point centered formula does not violate box constraints */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: ae_v_move(&state->fm2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: ae_v_move(&state->fp2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (8*(state->fp1.ptr.p_double[i]-state->fm1.ptr.p_double[i])-(state->fp2.ptr.p_double[i]-state->fm2.ptr.p_double[i]))/(6*state->diffstep*state->s.ptr.p_double[k]); } lbl_50: lbl_47: k = k+1; goto lbl_46; lbl_48: ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->needfi = ae_false; state->needfij = ae_true; for(i=0; i<=ng+nh; i++) { state->slpsolverstate.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->slpsolverstate.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]*state->s.ptr.p_double[k]; } } lbl_45: inc(&state->repnfev, _state); goto lbl_40; lbl_42: if( !state->slpsolverstate.xupdated ) { goto lbl_51; } /* * Report current point */ if( !state->xrep ) { goto lbl_53; } minnlc_unscale(state, &state->slpsolverstate.x, &state->slpsolverstate.scaledbndl, &state->slpsolverstate.scaledbndu, &state->x, _state); state->f = state->slpsolverstate.f; state->xupdated = ae_true; state->rstate.stage = 15; goto lbl_rcomm; lbl_15: state->xupdated = ae_false; lbl_53: goto lbl_40; lbl_51: ae_assert(state->slpsolverstate.needfij, "NLC:SLP:request", _state); goto lbl_40; lbl_41: state->repterminationtype = state->slpsolverstate.repterminationtype; state->repouteriterationscount = state->slpsolverstate.repouteriterationscount; state->repinneriterationscount = state->slpsolverstate.repinneriterationscount; state->repbcerr = state->slpsolverstate.repbcerr; state->repbcidx = state->slpsolverstate.repbcidx; state->replcerr = state->slpsolverstate.replcerr; state->replcidx = state->slpsolverstate.replcidx; state->repnlcerr = state->slpsolverstate.repnlcerr; state->repnlcidx = state->slpsolverstate.repnlcidx; minnlc_unscale(state, &state->slpsolverstate.stepkx, &state->slpsolverstate.scaledbndl, &state->slpsolverstate.scaledbndu, &state->xc, _state); result = ae_false; return result; lbl_38: /* * SQP solver */ if( state->solvertype!=2 ) { goto lbl_55; } if( ae_fp_neq(state->diffstep,(double)(0)) ) { rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->fbase, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm2, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fm1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp1, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp2, 1+ng+nh, _state); } minsqpinitbuf(&state->bndl, &state->bndu, &state->s, &state->xstart, n, &state->cleic, &state->lcsrcidx, state->nec, state->nic, state->ng, state->nh, state->epsx, state->maxits, &state->sqpsolverstate, _state); lbl_57: if( !minsqpiteration(&state->sqpsolverstate, &state->smonitor, state->userterminationneeded, _state) ) { goto lbl_58; } /* * Forward request to caller */ if( !state->sqpsolverstate.needfij ) { goto lbl_59; } /* * Evaluate target function/Jacobian */ if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_61; } /* * Analytic Jacobian is provided */ minnlc_unscale(state, &state->sqpsolverstate.x, &state->sqpsolverstate.scaledbndl, &state->sqpsolverstate.scaledbndu, &state->x, _state); state->needfij = ae_true; state->rstate.stage = 16; goto lbl_rcomm; lbl_16: state->needfij = ae_false; for(i=0; i<=ng+nh; i++) { state->sqpsolverstate.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->sqpsolverstate.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]*state->s.ptr.p_double[k]; } } goto lbl_62; lbl_61: /* * Numerical differentiation */ state->needfij = ae_false; state->needfi = ae_true; minnlc_unscale(state, &state->sqpsolverstate.x, &state->sqpsolverstate.scaledbndl, &state->sqpsolverstate.scaledbndu, &state->xbase, _state); k = 0; lbl_63: if( k>n-1 ) { goto lbl_65; } vleft = state->xbase.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; vright = state->xbase.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; if( !((state->hasbndl.ptr.p_bool[k]&&ae_fp_less(vleft,state->bndl.ptr.p_double[k]))||(state->hasbndu.ptr.p_bool[k]&&ae_fp_greater(vright,state->bndu.ptr.p_double[k]))) ) { goto lbl_66; } /* * Box constraint is violated by 4-point centered formula, use 2-point uncentered one */ if( state->hasbndl.ptr.p_bool[k]&&ae_fp_less(vleft,state->bndl.ptr.p_double[k]) ) { vleft = state->bndl.ptr.p_double[k]; } if( state->hasbndu.ptr.p_bool[k]&&ae_fp_greater(vright,state->bndu.ptr.p_double[k]) ) { vright = state->bndu.ptr.p_double[k]; } ae_assert(ae_fp_less_eq(vleft,vright), "MinNLC: integrity check failed", _state); if( ae_fp_eq(vleft,vright) ) { /* * Fixed variable */ for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (double)(0); } goto lbl_64; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = vleft; state->rstate.stage = 17; goto lbl_rcomm; lbl_17: ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = vright; state->rstate.stage = 18; goto lbl_rcomm; lbl_18: ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (state->fp1.ptr.p_double[i]-state->fm1.ptr.p_double[i])/(vright-vleft); } goto lbl_67; lbl_66: /* * 4-point centered formula does not violate box constraints */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 19; goto lbl_rcomm; lbl_19: ae_v_move(&state->fm2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 20; goto lbl_rcomm; lbl_20: ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+0.5*state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 21; goto lbl_rcomm; lbl_21: ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; state->rstate.stage = 22; goto lbl_rcomm; lbl_22: ae_v_move(&state->fp2.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = (8*(state->fp1.ptr.p_double[i]-state->fm1.ptr.p_double[i])-(state->fp2.ptr.p_double[i]-state->fm2.ptr.p_double[i]))/(6*state->diffstep*state->s.ptr.p_double[k]); } lbl_67: lbl_64: k = k+1; goto lbl_63; lbl_65: ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->rstate.stage = 23; goto lbl_rcomm; lbl_23: state->needfi = ae_false; state->needfij = ae_true; for(i=0; i<=ng+nh; i++) { state->sqpsolverstate.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->sqpsolverstate.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]*state->s.ptr.p_double[k]; } } lbl_62: inc(&state->repnfev, _state); goto lbl_57; lbl_59: if( !state->sqpsolverstate.xupdated ) { goto lbl_68; } /* * Report current point */ if( !state->xrep ) { goto lbl_70; } minnlc_unscale(state, &state->sqpsolverstate.x, &state->sqpsolverstate.scaledbndl, &state->sqpsolverstate.scaledbndu, &state->x, _state); state->f = state->sqpsolverstate.f; state->xupdated = ae_true; state->rstate.stage = 24; goto lbl_rcomm; lbl_24: state->xupdated = ae_false; lbl_70: goto lbl_57; lbl_68: ae_assert(state->sqpsolverstate.needfij, "NLC:SQP:request", _state); goto lbl_57; lbl_58: state->repterminationtype = state->sqpsolverstate.repterminationtype; state->repouteriterationscount = state->sqpsolverstate.repiterationscount; state->repinneriterationscount = state->sqpsolverstate.repiterationscount; state->repbcerr = state->sqpsolverstate.repbcerr; state->repbcidx = state->sqpsolverstate.repbcidx; state->replcerr = state->sqpsolverstate.replcerr; state->replcidx = state->sqpsolverstate.replcidx; state->repnlcerr = state->sqpsolverstate.repnlcerr; state->repnlcidx = state->sqpsolverstate.repnlcidx; minnlc_unscale(state, &state->sqpsolverstate.stepkx, &state->sqpsolverstate.scaledbndl, &state->sqpsolverstate.scaledbndu, &state->xc, _state); result = ae_false; return result; lbl_55: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = i; state->rstate.ia.ptr.p_int[1] = k; state->rstate.ia.ptr.p_int[2] = n; state->rstate.ia.ptr.p_int[3] = ng; state->rstate.ia.ptr.p_int[4] = nh; state->rstate.ra.ptr.p_double[0] = vleft; state->rstate.ra.ptr.p_double[1] = vright; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient/Jacobian. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function (constraints) at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient/Jacobian with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients/Jacobians, and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minnlcoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minnlcsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardgradient(minnlcstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinNLCOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinNLCOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) and/or constraints b) nonsmooth target function (non-C1) and/or constraints Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. This kind of monitoring does not work well with SQP because SQP solver needs just 1-2 function evaluations per step, which is not enough for OptGuard to make any conclusions. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardsmoothness(minnlcstate* state, ae_int_t level, ae_state *_state) { ae_assert(level==0||level==1, "MinNLCOptGuardSmoothness: unexpected value of level parameter", _state); state->smoothnessguardlevel = level; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minnlcoptguardgradient() for gradient verification * minnlcoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradfidx for specific function (Jacobian row) suspected * rep.badgradvidx for specific variable (Jacobian column) suspected * rep.badgradxbase, a point where gradient/Jacobian is tested * rep.badgraduser, user-provided gradient/Jacobian * rep.badgradnum, reference gradient/Jacobian obtained via numerical differentiation * rep.nonc0suspected, and additionally: * rep.nonc0fidx - an index of specific function violating C0 continuity * rep.nonc1suspected, and additionally * rep.nonc1fidx - an index of specific function violating C1 continuity Here function index 0 means target function, index 1 or higher denotes nonlinear constraints. === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minnlcoptguardnonc1test0results() * minnlcoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardresults(minnlcstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardnonc1test0results(minnlcstate* state, optguardnonc1test0report* strrep, optguardnonc1test0report* lngrep, ae_state *_state) { _optguardnonc1test0report_clear(strrep); _optguardnonc1test0report_clear(lngrep); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * fidx - is an index of the function (0 for target function, 1 or higher for nonlinear constraints) which is suspected of being "non-C1" * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minnlcoptguardnonc1test1results(minnlcstate* state, optguardnonc1test1report* strrep, optguardnonc1test1report* lngrep, ae_state *_state) { _optguardnonc1test1report_clear(strrep); _optguardnonc1test1report_clear(lngrep); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* MinNLC results: the solution found, completion codes and additional information. If you activated OptGuard integrity checking functionality and want to get OptGuard report, it can be retrieved with: * minnlcoptguardresults() - for a primary report about (a) suspected C0/C1 continuity violations and (b) errors in the analytic gradient. * minnlcoptguardnonc1test0results() - for C1 continuity violation test #0, detailed line search log * minnlcoptguardnonc1test1results() - for C1 continuity violation test #1, detailed line search log INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report, contains information about completion code, constraint violation at the solution and so on. You should check rep.terminationtype in order to distinguish successful termination from unsuccessful one: === FAILURE CODES === * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 box constraints are infeasible. Note: infeasibility of non-box constraints does NOT trigger emergency completion; you have to examine rep.bcerr/rep.lcerr/rep.nlcerr to detect possibly inconsistent constraints. === SUCCESS CODES === * 2 scaled step is no more than EpsX. * 5 MaxIts steps were taken. * 8 user requested algorithm termination via minnlcrequesttermination(), last accepted point is returned. More information about fields of this structure can be found in the comments on minnlcreport datatype. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcresults(minnlcstate* state, /* Real */ ae_vector* x, minnlcreport* rep, ae_state *_state) { ae_vector_clear(x); _minnlcreport_clear(rep); minnlcresultsbuf(state, x, rep, _state); } /************************************************************************* NLC results Buffered implementation of MinNLCResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcresultsbuf(minnlcstate* state, /* Real */ ae_vector* x, minnlcreport* rep, ae_state *_state) { ae_int_t i; if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } rep->iterationscount = state->repinneriterationscount; rep->nfev = state->repnfev; rep->terminationtype = state->repterminationtype; rep->bcerr = state->repbcerr; rep->bcidx = state->repbcidx; rep->lcerr = state->replcerr; rep->lcidx = state->replcidx; rep->nlcerr = state->repnlcerr; rep->nlcidx = state->repnlcidx; rep->dbgphase0its = state->repdbgphase0its; if( state->repterminationtype>0 ) { ae_v_move(&x->ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); } else { for(i=0; i<=state->n-1; i++) { x->ptr.p_double[i] = _state->v_nan; } } } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minnlcrequesttermination(minnlcstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinNLCCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnlcrestartfrom(minnlcstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->n; /* * First, check for errors in the inputs */ ae_assert(x->cnt>=n, "MinNLCRestartFrom: Length(X)xstart.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * prepare RComm facilities */ ae_vector_set_length(&state->rstate.ia, 4+1, _state); ae_vector_set_length(&state->rstate.ra, 1+1, _state); state->rstate.stage = -1; minnlc_clearrequestfields(state, _state); } /************************************************************************* Penalty function for equality constraints. INPUT PARAMETERS: Alpha - function argument. Penalty function becomes large when Alpha approaches -1 or +1. It is defined for Alpha<=-1 or Alpha>=+1 - in this case infinite value is returned. OUTPUT PARAMETERS: F - depending on Alpha: * for Alpha in (-1+eps,+1-eps), F=F(Alpha) * for Alpha outside of interval, F is some very large number DF - depending on Alpha: * for Alpha in (-1+eps,+1-eps), DF=dF(Alpha)/dAlpha, exact numerical derivative. * otherwise, it is zero D2F - second derivative -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcequalitypenaltyfunction(double alpha, double* f, double* df, double* d2f, ae_state *_state) { *f = 0; *df = 0; *d2f = 0; *f = 0.5*alpha*alpha; *df = alpha; *d2f = 1.0; } /************************************************************************* "Penalty" function for inequality constraints, which is multiplied by penalty coefficient Rho. "Penalty" function plays only supplementary role - it helps to stabilize algorithm when solving non-convex problems. Because it is multiplied by fixed and large Rho - not Lagrange multiplier Nu which may become arbitrarily small! - it enforces convexity of the problem behind the boundary of the feasible area. This function is zero at the feasible area and in the close neighborhood, it becomes non-zero only at some distance (scaling is essential!) and grows quadratically. Penalty function must enter augmented Lagrangian as Rho*PENALTY(x-lowerbound) with corresponding changes being made for upper bound or other kinds of constraints. INPUT PARAMETERS: Alpha - function argument. Typically, if we have active constraint with precise Lagrange multiplier, we have Alpha around 1. Large positive Alpha's correspond to inner area of the feasible set. Alpha<1 corresponds to outer area of the feasible set. StabilizingPoint- point where F becomes non-zero. Must be negative value, at least -1, large values (hundreds) are possible. OUTPUT PARAMETERS: F - F(Alpha) DF - DF=dF(Alpha)/dAlpha, exact derivative D2F - second derivative NOTE: it is important to have significantly non-zero StabilizingPoint, because when it is large, shift term does not interfere with Lagrange multipliers converging to their final values. Thus, convergence of such modified AUL algorithm is still guaranteed by same set of theorems. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcinequalitypenaltyfunction(double alpha, double stabilizingpoint, double* f, double* df, double* d2f, ae_state *_state) { *f = 0; *df = 0; *d2f = 0; if( ae_fp_greater_eq(alpha,stabilizingpoint) ) { *f = 0.0; *df = 0.0; *d2f = 0.0; } else { alpha = alpha-stabilizingpoint; *f = 0.5*alpha*alpha; *df = alpha; *d2f = 1.0; } } /************************************************************************* "Shift" function for inequality constraints, which is multiplied by corresponding Lagrange multiplier. "Shift" function is a main factor which enforces inequality constraints. Inequality penalty function plays only supplementary role - it prevents accidental step deep into infeasible area when working with non-convex problems (read comments on corresponding function for more information). Shift function must enter augmented Lagrangian as Nu/Rho*SHIFT((x-lowerbound)*Rho+1) with corresponding changes being made for upper bound or other kinds of constraints. INPUT PARAMETERS: Alpha - function argument. Typically, if we have active constraint with precise Lagrange multiplier, we have Alpha around 1. Large positive Alpha's correspond to inner area of the feasible set. Alpha<1 corresponds to outer area of the feasible set. OUTPUT PARAMETERS: F - F(Alpha) DF - DF=dF(Alpha)/dAlpha, exact derivative D2F - second derivative -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ void minnlcinequalityshiftfunction(double alpha, double* f, double* df, double* d2f, ae_state *_state) { *f = 0; *df = 0; *d2f = 0; if( ae_fp_greater_eq(alpha,0.5) ) { *f = -ae_log(alpha, _state); *df = -1/alpha; *d2f = 1/(alpha*alpha); } else { *f = 2*alpha*alpha-4*alpha+(ae_log((double)(2), _state)+1.5); *df = 4*alpha-4; *d2f = (double)(4); } } /************************************************************************* Clears request fileds (to be sure that we don't forget to clear something) *************************************************************************/ static void minnlc_clearrequestfields(minnlcstate* state, ae_state *_state) { state->needfi = ae_false; state->needfij = ae_false; state->xupdated = ae_false; } /************************************************************************* Internal initialization subroutine. Sets default NLC solver with default criteria. *************************************************************************/ static void minnlc_minnlcinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnlcstate* state, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); /* * Default params */ state->stabilizingpoint = -2.0; state->initialinequalitymultiplier = 1.0; /* * Smoothness monitor, default init */ state->teststep = (double)(0); state->smoothnessguardlevel = 0; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); /* * Initialize other params */ state->n = n; state->diffstep = diffstep; state->userterminationneeded = ae_false; ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->hasbndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->hasbndu, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->lastscaleused, n, _state); ae_vector_set_length(&state->xstart, n, _state); ae_vector_set_length(&state->xc, n, _state); ae_vector_set_length(&state->x, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->hasbndl.ptr.p_bool[i] = ae_false; state->bndu.ptr.p_double[i] = _state->v_posinf; state->hasbndu.ptr.p_bool[i] = ae_false; state->s.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; state->xstart.ptr.p_double[i] = x->ptr.p_double[i]; state->xc.ptr.p_double[i] = x->ptr.p_double[i]; } minnlcsetlc(state, &c, &ct, 0, _state); minnlcsetnlc(state, 0, 0, _state); minnlcsetcond(state, 0.0, 0, _state); minnlcsetxrep(state, ae_false, _state); minnlcsetalgosqp(state, _state); minnlcsetprecexactrobust(state, 0, _state); minnlcsetstpmax(state, 0.0, _state); minlbfgscreate(n, ae_minint(minnlc_lbfgsfactor, n, _state), x, &state->auloptimizer, _state); minnlcrestartfrom(state, x, _state); ae_frame_leave(_state); } /************************************************************************* This function clears preconditioner for L-BFGS optimizer (sets it do default state); Parameters: AULOptimizer - optimizer to tune -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static void minnlc_clearpreconditioner(minlbfgsstate* auloptimizer, ae_state *_state) { minlbfgssetprecdefault(auloptimizer, _state); } /************************************************************************* This function updates preconditioner for L-BFGS optimizer. Parameters: PrecType - preconditioner type: * 0 for unpreconditioned iterations * 1 for inexact LBFGS * 2 for exact low rank preconditioner update after each UpdateFreq its * 3 for exact robust preconditioner update after each UpdateFreq its UpdateFreq - update frequency PrecCounter - iterations counter, must be zero on the first call, automatically increased by this function. This counter is used to implement "update-once-in-X-iterations" scheme. AULOptimizer - optimizer to tune X - current point Rho - penalty term GammaK - current estimate of Hessian norm (used for initialization of preconditioner). Can be zero, in which case Hessian is assumed to be unit. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static void minnlc_updatepreconditioner(ae_int_t prectype, ae_int_t updatefreq, ae_int_t* preccounter, minlbfgsstate* auloptimizer, /* Real */ ae_vector* x, double rho, double gammak, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* hasbndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* nubc, /* Real */ ae_matrix* cleic, /* Real */ ae_vector* nulc, /* Real */ ae_vector* fi, /* Real */ ae_matrix* jac, /* Real */ ae_vector* nunlc, /* Real */ ae_vector* bufd, /* Real */ ae_vector* bufc, /* Real */ ae_matrix* bufw, /* Real */ ae_matrix* bufz, /* Real */ ae_vector* tmp0, ae_int_t n, ae_int_t nec, ae_int_t nic, ae_int_t ng, ae_int_t nh, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; double v; double p; double dp; double d2p; ae_bool bflag; ae_assert(ae_fp_greater(rho,(double)(0)), "MinNLC: integrity check failed", _state); rvectorsetlengthatleast(bufd, n, _state); rvectorsetlengthatleast(bufc, nec+nic+ng+nh, _state); rmatrixsetlengthatleast(bufw, nec+nic+ng+nh, n, _state); rvectorsetlengthatleast(tmp0, n, _state); /* * Preconditioner before update from barrier/penalty functions */ if( ae_fp_eq(gammak,(double)(0)) ) { gammak = (double)(1); } for(i=0; i<=n-1; i++) { bufd->ptr.p_double[i] = gammak; } /* * Update diagonal Hessian using nonlinearity from boundary constraints: * * penalty term from equality constraints * * shift term from inequality constraints * * NOTE: penalty term for inequality constraints is ignored because it * is large only in exceptional cases. */ for(i=0; i<=n-1; i++) { if( (hasbndl->ptr.p_bool[i]&&hasbndu->ptr.p_bool[i])&&ae_fp_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { minnlcequalitypenaltyfunction((x->ptr.p_double[i]-bndl->ptr.p_double[i])*rho, &p, &dp, &d2p, _state); bufd->ptr.p_double[i] = bufd->ptr.p_double[i]+d2p*rho; continue; } if( hasbndl->ptr.p_bool[i] ) { minnlcinequalityshiftfunction((x->ptr.p_double[i]-bndl->ptr.p_double[i])*rho+1, &p, &dp, &d2p, _state); bufd->ptr.p_double[i] = bufd->ptr.p_double[i]+nubc->ptr.p_double[2*i+0]*d2p*rho; } if( hasbndu->ptr.p_bool[i] ) { minnlcinequalityshiftfunction((bndu->ptr.p_double[i]-x->ptr.p_double[i])*rho+1, &p, &dp, &d2p, _state); bufd->ptr.p_double[i] = bufd->ptr.p_double[i]+nubc->ptr.p_double[2*i+1]*d2p*rho; } } /* * Process linear constraints */ for(i=0; i<=nec+nic-1; i++) { ae_v_move(&bufw->ptr.pp_double[i][0], 1, &cleic->ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); v = ae_v_dotproduct(&cleic->ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); v = v-cleic->ptr.pp_double[i][n]; if( iptr.p_double[i] = d2p*rho; } else { /* * Inequality constraint */ minnlcinequalityshiftfunction(-v*rho+1, &p, &dp, &d2p, _state); bufc->ptr.p_double[i] = nulc->ptr.p_double[i]*d2p*rho; } } /* * Process nonlinear constraints */ for(i=0; i<=ng+nh-1; i++) { ae_v_move(&bufw->ptr.pp_double[nec+nic+i][0], 1, &jac->ptr.pp_double[1+i][0], 1, ae_v_len(0,n-1)); v = fi->ptr.p_double[1+i]; if( iptr.p_double[nec+nic+i] = d2p*rho; } else { /* * Inequality constraint */ minnlcinequalityshiftfunction(-v*rho+1, &p, &dp, &d2p, _state); bufc->ptr.p_double[nec+nic+i] = nunlc->ptr.p_double[i]*d2p*rho; } } /* * Add regularizer (large Rho often result in nearly-degenerate matrices; * sometimes Cholesky decomposition fails without regularization). * * We use RegPrec*diag(W'*W) as preconditioner. */ k = nec+nic+ng+nh; for(j=0; j<=n-1; j++) { tmp0->ptr.p_double[j] = 0.0; } for(i=0; i<=k-1; i++) { v = bufc->ptr.p_double[i]; for(j=0; j<=n-1; j++) { tmp0->ptr.p_double[j] = tmp0->ptr.p_double[j]+v*bufw->ptr.pp_double[i][j]*bufw->ptr.pp_double[i][j]; } } for(j=0; j<=n-1; j++) { bufd->ptr.p_double[j] = bufd->ptr.p_double[j]+minnlc_regprec*tmp0->ptr.p_double[j]; } /* * Apply preconditioner */ if( prectype==1 ) { minlbfgssetprecrankklbfgsfast(auloptimizer, bufd, bufc, bufw, nec+nic+ng+nh, _state); } if( prectype==2&&*preccounter%updatefreq==0 ) { minlbfgssetpreclowrankexact(auloptimizer, bufd, bufc, bufw, nec+nic+ng+nh, _state); } if( prectype==3&&*preccounter%updatefreq==0 ) { /* * Generate full NxN dense Hessian */ rmatrixsetlengthatleast(bufz, n, n, _state); for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { bufz->ptr.pp_double[i][j] = (double)(0); } bufz->ptr.pp_double[i][i] = bufd->ptr.p_double[i]; } if( nec+nic+ng+nh>0 ) { for(i=0; i<=nec+nic+ng+nh-1; i++) { ae_assert(ae_fp_greater_eq(bufc->ptr.p_double[i],(double)(0)), "MinNLC: updatepreconditioner() integrity failure", _state); v = ae_sqrt(bufc->ptr.p_double[i], _state); for(j=0; j<=n-1; j++) { bufw->ptr.pp_double[i][j] = bufw->ptr.pp_double[i][j]*v; } } rmatrixsyrk(n, nec+nic+ng+nh, 1.0, bufw, 0, 0, 2, 1.0, bufz, 0, 0, ae_true, _state); } /* * Evaluate Cholesky decomposition, set preconditioner */ bflag = spdmatrixcholeskyrec(bufz, 0, n, ae_true, bufd, _state); ae_assert(bflag, "MinNLC: updatepreconditioner() failure, Cholesky failed", _state); minlbfgssetpreccholesky(auloptimizer, bufz, ae_true, _state); } inc(preccounter, _state); } /************************************************************************* This subroutine adds penalty from boundary constraints to target function and its gradient. Penalty function is one which is used for main AUL cycle - with Lagrange multipliers and infinite at the barrier and beyond. Parameters: X[] - current point BndL[], BndU[] - boundary constraints HasBndL[], HasBndU[] - I-th element is True if corresponding constraint is present NuBC[] - Lagrange multipliers corresponding to constraints Rho - penalty term StabilizingPoint - branch point for inequality stabilizing term F - function value to modify G - gradient to modify -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static void minnlc_penaltybc(/* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Boolean */ ae_vector* hasbndl, /* Real */ ae_vector* bndu, /* Boolean */ ae_vector* hasbndu, /* Real */ ae_vector* nubc, ae_int_t n, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t i; double p; double dp; double d2p; for(i=0; i<=n-1; i++) { if( (hasbndl->ptr.p_bool[i]&&hasbndu->ptr.p_bool[i])&&ae_fp_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]) ) { /* * I-th boundary constraint is of equality-type */ minnlcequalitypenaltyfunction((x->ptr.p_double[i]-bndl->ptr.p_double[i])*rho, &p, &dp, &d2p, _state); *f = *f+p/rho-nubc->ptr.p_double[2*i+0]*(x->ptr.p_double[i]-bndl->ptr.p_double[i]); g->ptr.p_double[i] = g->ptr.p_double[i]+dp-nubc->ptr.p_double[2*i+0]; continue; } if( hasbndl->ptr.p_bool[i] ) { /* * Handle lower bound */ minnlcinequalitypenaltyfunction(x->ptr.p_double[i]-bndl->ptr.p_double[i], stabilizingpoint, &p, &dp, &d2p, _state); *f = *f+rho*p; g->ptr.p_double[i] = g->ptr.p_double[i]+rho*dp; minnlcinequalityshiftfunction((x->ptr.p_double[i]-bndl->ptr.p_double[i])*rho+1, &p, &dp, &d2p, _state); *f = *f+p/rho*nubc->ptr.p_double[2*i+0]; g->ptr.p_double[i] = g->ptr.p_double[i]+dp*nubc->ptr.p_double[2*i+0]; } if( hasbndu->ptr.p_bool[i] ) { /* * Handle upper bound */ minnlcinequalitypenaltyfunction(bndu->ptr.p_double[i]-x->ptr.p_double[i], stabilizingpoint, &p, &dp, &d2p, _state); *f = *f+rho*p; g->ptr.p_double[i] = g->ptr.p_double[i]-rho*dp; minnlcinequalityshiftfunction((bndu->ptr.p_double[i]-x->ptr.p_double[i])*rho+1, &p, &dp, &d2p, _state); *f = *f+p/rho*nubc->ptr.p_double[2*i+1]; g->ptr.p_double[i] = g->ptr.p_double[i]-dp*nubc->ptr.p_double[2*i+1]; } } } /************************************************************************* This subroutine adds penalty from linear constraints to target function and its gradient. Penalty function is one which is used for main AUL cycle - with Lagrange multipliers and infinite at the barrier and beyond. Parameters: X[] - current point CLEIC[] - constraints matrix, first NEC rows are equality ones, next NIC rows are inequality ones. array[NEC+NIC,N+1] NuLC[] - Lagrange multipliers corresponding to constraints, array[NEC+NIC] N - dimensionalty NEC - number of equality constraints NIC - number of inequality constraints. Rho - penalty term StabilizingPoint - branch point for inequality stabilizing term F - function value to modify G - gradient to modify -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static void minnlc_penaltylc(/* Real */ ae_vector* x, /* Real */ ae_matrix* cleic, /* Real */ ae_vector* nulc, ae_int_t n, ae_int_t nec, ae_int_t nic, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t i; double v; double p; double dp; double d2p; double fupd; double gupd; for(i=0; i<=nec+nic-1; i++) { v = ae_v_dotproduct(&cleic->ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); v = v-cleic->ptr.pp_double[i][n]; fupd = (double)(0); gupd = (double)(0); if( iptr.p_double[i]*v; gupd = gupd-nulc->ptr.p_double[i]; } else { /* * Inequality constraint */ minnlcinequalitypenaltyfunction(-v, stabilizingpoint, &p, &dp, &d2p, _state); fupd = fupd+p*rho; gupd = gupd-dp*rho; minnlcinequalityshiftfunction(-v*rho+1, &p, &dp, &d2p, _state); fupd = fupd+p/rho*nulc->ptr.p_double[i]; gupd = gupd-dp*nulc->ptr.p_double[i]; } *f = *f+fupd; ae_v_addd(&g->ptr.p_double[0], 1, &cleic->ptr.pp_double[i][0], 1, ae_v_len(0,n-1), gupd); } } /************************************************************************* This subroutine adds penalty from nonlinear constraints to target function and its gradient. Penalty function is one which is used for main AUL cycle - with Lagrange multipliers and infinite at the barrier and beyond. Parameters: Fi[] - function vector: * 1 component for function being minimized * NG components for equality constraints G_i(x)=0 * NH components for inequality constraints H_i(x)<=0 J[] - Jacobian matrix, array[1+NG+NH,N] NuNLC[] - Lagrange multipliers corresponding to constraints, array[NG+NH] N - number of dimensions NG - number of equality constraints NH - number of inequality constraints Rho - penalty term StabilizingPoint - branch point for inequality stabilizing term F - function value to modify G - gradient to modify -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static void minnlc_penaltynlc(/* Real */ ae_vector* fi, /* Real */ ae_matrix* j, /* Real */ ae_vector* nunlc, ae_int_t n, ae_int_t ng, ae_int_t nh, double rho, double stabilizingpoint, double* f, /* Real */ ae_vector* g, ae_state *_state) { ae_int_t i; double v; double p; double dp; double d2p; double fupd; double gupd; /* * IMPORTANT: loop starts from 1, not zero! */ for(i=1; i<=ng+nh; i++) { v = fi->ptr.p_double[i]; fupd = (double)(0); gupd = (double)(0); if( i<=ng ) { /* * Equality constraint */ minnlcequalitypenaltyfunction(v*rho, &p, &dp, &d2p, _state); fupd = fupd+p/rho; gupd = gupd+dp; fupd = fupd-nunlc->ptr.p_double[i-1]*v; gupd = gupd-nunlc->ptr.p_double[i-1]; } else { /* * Inequality constraint */ minnlcinequalitypenaltyfunction(-v, stabilizingpoint, &p, &dp, &d2p, _state); fupd = fupd+p*rho; gupd = gupd-dp*rho; minnlcinequalityshiftfunction(-v*rho+1, &p, &dp, &d2p, _state); fupd = fupd+p/rho*nunlc->ptr.p_double[i-1]; gupd = gupd-dp*nunlc->ptr.p_double[i-1]; } *f = *f+fupd; ae_v_addd(&g->ptr.p_double[0], 1, &j->ptr.pp_double[i][0], 1, ae_v_len(0,n-1), gupd); } } /************************************************************************* This function performs actual processing for AUL algorithm. It expects that caller redirects its reverse communication requests NeedFiJ/XUpdated to external user who will provide analytic derivative (or handle reports about progress). In case external user does not have analytic derivative, it is responsibility of caller to intercept NeedFiJ request and replace it with appropriate numerical differentiation scheme. -- ALGLIB -- Copyright 06.06.2014 by Bochkanov Sergey *************************************************************************/ static ae_bool minnlc_auliteration(minnlcstate* state, smoothnessmonitor* smonitor, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t ng; ae_int_t nh; ae_int_t i; ae_int_t j; ae_int_t outerit; ae_int_t preccounter; double v; double vv; double p; double dp; double d2p; double v0; double v1; double v2; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstateaul.stage>=0 ) { n = state->rstateaul.ia.ptr.p_int[0]; nec = state->rstateaul.ia.ptr.p_int[1]; nic = state->rstateaul.ia.ptr.p_int[2]; ng = state->rstateaul.ia.ptr.p_int[3]; nh = state->rstateaul.ia.ptr.p_int[4]; i = state->rstateaul.ia.ptr.p_int[5]; j = state->rstateaul.ia.ptr.p_int[6]; outerit = state->rstateaul.ia.ptr.p_int[7]; preccounter = state->rstateaul.ia.ptr.p_int[8]; v = state->rstateaul.ra.ptr.p_double[0]; vv = state->rstateaul.ra.ptr.p_double[1]; p = state->rstateaul.ra.ptr.p_double[2]; dp = state->rstateaul.ra.ptr.p_double[3]; d2p = state->rstateaul.ra.ptr.p_double[4]; v0 = state->rstateaul.ra.ptr.p_double[5]; v1 = state->rstateaul.ra.ptr.p_double[6]; v2 = state->rstateaul.ra.ptr.p_double[7]; } else { n = -788; nec = 809; nic = 205; ng = -838; nh = 939; i = -526; j = 763; outerit = -541; preccounter = -698; v = -900; vv = -318; p = -940; dp = 1016; d2p = -229; v0 = -536; v1 = 487; v2 = -115; } if( state->rstateaul.stage==0 ) { goto lbl_0; } if( state->rstateaul.stage==1 ) { goto lbl_1; } if( state->rstateaul.stage==2 ) { goto lbl_2; } /* * Routine body */ ae_assert(state->solvertype==0, "MinNLC: internal error", _state); n = state->n; nec = state->nec; nic = state->nic; ng = state->ng; nh = state->nh; /* * Prepare scaled problem */ rvectorsetlengthatleast(&state->scaledbndl, n, _state); rvectorsetlengthatleast(&state->scaledbndu, n, _state); rmatrixsetlengthatleast(&state->scaledcleic, nec+nic, n+1, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i] ) { state->scaledbndl.ptr.p_double[i] = state->bndl.ptr.p_double[i]/state->s.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i] ) { state->scaledbndu.ptr.p_double[i] = state->bndu.ptr.p_double[i]/state->s.ptr.p_double[i]; } state->xc.ptr.p_double[i] = state->xstart.ptr.p_double[i]/state->s.ptr.p_double[i]; } for(i=0; i<=nec+nic-1; i++) { /* * Scale and normalize linear constraints */ vv = 0.0; for(j=0; j<=n-1; j++) { v = state->cleic.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; state->scaledcleic.ptr.pp_double[i][j] = v; vv = vv+v*v; } vv = ae_sqrt(vv, _state); state->scaledcleic.ptr.pp_double[i][n] = state->cleic.ptr.pp_double[i][n]; if( ae_fp_greater(vv,(double)(0)) ) { for(j=0; j<=n; j++) { state->scaledcleic.ptr.pp_double[i][j] = state->scaledcleic.ptr.pp_double[i][j]/vv; } } } /* * Prepare stopping criteria */ minlbfgssetcond(&state->auloptimizer, (double)(0), (double)(0), state->epsx, state->maxits, _state); minlbfgssetstpmax(&state->auloptimizer, state->stpmax, _state); /* * Main AUL cycle: * * prepare Lagrange multipliers NuNB/NuLC * * set GammaK (current estimate of Hessian norm) to InitGamma and XKPresent to False */ rvectorsetlengthatleast(&state->nubc, 2*n, _state); rvectorsetlengthatleast(&state->nulc, nec+nic, _state); rvectorsetlengthatleast(&state->nunlc, ng+nh, _state); rvectorsetlengthatleast(&state->xk, n, _state); rvectorsetlengthatleast(&state->gk, n, _state); rvectorsetlengthatleast(&state->xk1, n, _state); rvectorsetlengthatleast(&state->gk1, n, _state); for(i=0; i<=n-1; i++) { state->nubc.ptr.p_double[2*i+0] = 0.0; state->nubc.ptr.p_double[2*i+1] = 0.0; if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { continue; } if( state->hasbndl.ptr.p_bool[i] ) { state->nubc.ptr.p_double[2*i+0] = state->initialinequalitymultiplier; } if( state->hasbndu.ptr.p_bool[i] ) { state->nubc.ptr.p_double[2*i+1] = state->initialinequalitymultiplier; } } for(i=0; i<=nec-1; i++) { state->nulc.ptr.p_double[i] = 0.0; } for(i=0; i<=nic-1; i++) { state->nulc.ptr.p_double[nec+i] = state->initialinequalitymultiplier; } for(i=0; i<=ng-1; i++) { state->nunlc.ptr.p_double[i] = 0.0; } for(i=0; i<=nh-1; i++) { state->nunlc.ptr.p_double[ng+i] = state->initialinequalitymultiplier; } state->gammak = minnlc_initgamma; state->xkpresent = ae_false; ae_assert(state->aulitscnt>0, "MinNLC: integrity check failed", _state); minnlc_clearpreconditioner(&state->auloptimizer, _state); outerit = 0; lbl_3: if( outerit>state->aulitscnt-1 ) { goto lbl_5; } /* * Optimize with current Lagrange multipliers * * NOTE: this code expects and checks that line search ends in the * point which is used as beginning for the next search. Such * guarantee is given by MCSRCH function. L-BFGS optimizer * does not formally guarantee it, but it follows same rule. * Below we a) rely on such property of the optimizer, and b) * assert that it is true, in order to fail loudly if it is * not true. * * NOTE: security check for NAN/INF in F/G is responsibility of * LBFGS optimizer. AUL optimizer checks for NAN/INF only * when we update Lagrange multipliers. */ preccounter = 0; minlbfgssetxrep(&state->auloptimizer, ae_true, _state); minlbfgsrestartfrom(&state->auloptimizer, &state->xc, _state); lbl_6: if( !minlbfgsiteration(&state->auloptimizer, _state) ) { goto lbl_7; } if( !state->auloptimizer.needfg ) { goto lbl_8; } /* * Un-scale X, evaluate F/G/H, re-scale Jacobian */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->auloptimizer.x.ptr.p_double[i]*state->s.ptr.p_double[i]; } state->needfij = ae_true; state->rstateaul.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; for(i=0; i<=ng+nh; i++) { for(j=0; j<=n-1; j++) { state->j.ptr.pp_double[i][j] = state->j.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; } } /* * Store data for estimation of Hessian norm: * * current point (re-scaled) * * gradient of the target function (re-scaled, unmodified) */ ae_v_move(&state->xk1.ptr.p_double[0], 1, &state->auloptimizer.x.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->gk1.ptr.p_double[0], 1, &state->j.ptr.pp_double[0][0], 1, ae_v_len(0,n-1)); /* * Function being optimized */ state->auloptimizer.f = state->fi.ptr.p_double[0]; for(i=0; i<=n-1; i++) { state->auloptimizer.g.ptr.p_double[i] = state->j.ptr.pp_double[0][i]; } /* * Send information to OptGuard monitor */ smoothnessmonitorenqueuepoint(smonitor, &state->auloptimizer.d, state->auloptimizer.stp, &state->auloptimizer.x, &state->fi, &state->j, _state); /* * Penalty for violation of boundary/linear/nonlinear constraints */ minnlc_penaltybc(&state->auloptimizer.x, &state->scaledbndl, &state->hasbndl, &state->scaledbndu, &state->hasbndu, &state->nubc, n, state->rho, state->stabilizingpoint, &state->auloptimizer.f, &state->auloptimizer.g, _state); minnlc_penaltylc(&state->auloptimizer.x, &state->scaledcleic, &state->nulc, n, nec, nic, state->rho, state->stabilizingpoint, &state->auloptimizer.f, &state->auloptimizer.g, _state); minnlc_penaltynlc(&state->fi, &state->j, &state->nunlc, n, ng, nh, state->rho, state->stabilizingpoint, &state->auloptimizer.f, &state->auloptimizer.g, _state); /* * Forward termination request if needed */ if( state->userterminationneeded ) { minlbfgsrequesttermination(&state->auloptimizer, _state); } /* * To optimizer */ goto lbl_6; lbl_8: if( !state->auloptimizer.xupdated ) { goto lbl_10; } /* * Report current point (if needed) */ if( !state->xrep ) { goto lbl_12; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->auloptimizer.x.ptr.p_double[i]*state->s.ptr.p_double[i]; } state->f = state->auloptimizer.f; state->xupdated = ae_true; state->rstateaul.stage = 1; goto lbl_rcomm; lbl_1: state->xupdated = ae_false; lbl_12: /* * Send information to OptGuard monitor */ smoothnessmonitorfinalizelinesearch(smonitor, _state); smoothnessmonitorstartlinesearch(smonitor, &state->auloptimizer.x, &state->fi, &state->j, _state); /* * Forward termination request if needed */ if( state->userterminationneeded ) { minlbfgsrequesttermination(&state->auloptimizer, _state); } /* * Update constraints violation */ checkbcviolation(&state->hasbndl, &state->scaledbndl, &state->hasbndu, &state->scaledbndu, &state->auloptimizer.x, n, &state->s, ae_false, &state->repbcerr, &state->repbcidx, _state); checklcviolation(&state->scaledcleic, &state->lcsrcidx, nec, nic, &state->auloptimizer.x, n, &state->replcerr, &state->replcidx, _state); checknlcviolation(&state->fi, ng, nh, &state->repnlcerr, &state->repnlcidx, _state); /* * Update GammaK */ if( state->xkpresent ) { /* * XK/GK store beginning of current line search, and XK1/GK1 * store data for the end of the line search: * * first, we Assert() that XK1 (last point where function * was evaluated) is same as AULOptimizer.X (what is * reported by RComm interface * * calculate step length V2. * * If V2>HessEstTol, then: * * calculate V0 - directional derivative at XK, * and V1 - directional derivative at XK1 * * set GammaK to Max(GammaK, |V1-V0|/V2) */ for(i=0; i<=n-1; i++) { ae_assert(ae_fp_less_eq(ae_fabs(state->auloptimizer.x.ptr.p_double[i]-state->xk1.ptr.p_double[i], _state),100*ae_machineepsilon)||!(ae_isfinite(state->auloptimizer.x.ptr.p_double[i], _state)&&ae_isfinite(state->xk1.ptr.p_double[i], _state)), "MinNLC: integrity check failed, unexpected behavior of LBFGS optimizer", _state); } v2 = 0.0; for(i=0; i<=n-1; i++) { v2 = v2+ae_sqr(state->xk.ptr.p_double[i]-state->xk1.ptr.p_double[i], _state); } v2 = ae_sqrt(v2, _state); if( ae_fp_greater(v2,minnlc_hessesttol) ) { v0 = 0.0; v1 = 0.0; for(i=0; i<=n-1; i++) { v = (state->xk.ptr.p_double[i]-state->xk1.ptr.p_double[i])/v2; v0 = v0+state->gk.ptr.p_double[i]*v; v1 = v1+state->gk1.ptr.p_double[i]*v; } state->gammak = ae_maxreal(state->gammak, ae_fabs(v1-v0, _state)/v2, _state); } } else { /* * Beginning of the first line search, XK is not yet initialized. */ ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xk1.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->gk.ptr.p_double[0], 1, &state->gk1.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->xkpresent = ae_true; } /* * Update preconsitioner using current GammaK */ minnlc_updatepreconditioner(state->prectype, state->updatefreq, &preccounter, &state->auloptimizer, &state->auloptimizer.x, state->rho, state->gammak, &state->scaledbndl, &state->hasbndl, &state->scaledbndu, &state->hasbndu, &state->nubc, &state->scaledcleic, &state->nulc, &state->fi, &state->j, &state->nunlc, &state->bufd, &state->bufc, &state->bufw, &state->bufz, &state->tmp0, n, nec, nic, ng, nh, _state); goto lbl_6; lbl_10: ae_assert(ae_false, "MinNLC: integrity check failed", _state); goto lbl_6; lbl_7: minlbfgsresultsbuf(&state->auloptimizer, &state->xc, &state->aulreport, _state); state->repinneriterationscount = state->repinneriterationscount+state->aulreport.iterationscount; state->repnfev = state->repnfev+state->aulreport.nfev; state->repterminationtype = state->aulreport.terminationtype; inc(&state->repouteriterationscount, _state); if( state->repterminationtype<=0||state->repterminationtype==8 ) { goto lbl_5; } /* * 1. Evaluate F/J * 2. Check for NAN/INF in F/J: we just calculate sum of their * components, it should be enough to reduce vector/matrix to * just one value which either "normal" (all summands were "normal") * or NAN/INF (at least one summand was NAN/INF). * 3. Update Lagrange multipliers */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xc.ptr.p_double[i]*state->s.ptr.p_double[i]; } state->needfij = ae_true; state->rstateaul.stage = 2; goto lbl_rcomm; lbl_2: state->needfij = ae_false; v = 0.0; for(i=0; i<=ng+nh; i++) { v = 0.1*v+state->fi.ptr.p_double[i]; for(j=0; j<=n-1; j++) { v = 0.1*v+state->j.ptr.pp_double[i][j]; } } if( !ae_isfinite(v, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } for(i=0; i<=ng+nh; i++) { for(j=0; j<=n-1; j++) { state->j.ptr.pp_double[i][j] = state->j.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; } } for(i=0; i<=n-1; i++) { /* * Process coefficients corresponding to equality-type * constraints. */ if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { minnlcequalitypenaltyfunction((state->xc.ptr.p_double[i]-state->scaledbndl.ptr.p_double[i])*state->rho, &p, &dp, &d2p, _state); state->nubc.ptr.p_double[2*i+0] = boundval(state->nubc.ptr.p_double[2*i+0]-dp, -minnlc_maxlagmult, minnlc_maxlagmult, _state); continue; } /* * Process coefficients corresponding to inequality-type * constraints. These coefficients have limited growth/decay * per iteration which helps to stabilize algorithm. */ ae_assert(ae_fp_greater(minnlc_aulmaxgrowth,1.0), "MinNLC: integrity error", _state); if( state->hasbndl.ptr.p_bool[i] ) { minnlcinequalityshiftfunction((state->xc.ptr.p_double[i]-state->scaledbndl.ptr.p_double[i])*state->rho+1, &p, &dp, &d2p, _state); v = ae_fabs(dp, _state); v = ae_minreal(v, minnlc_aulmaxgrowth, _state); v = ae_maxreal(v, 1/minnlc_aulmaxgrowth, _state); state->nubc.ptr.p_double[2*i+0] = boundval(state->nubc.ptr.p_double[2*i+0]*v, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } if( state->hasbndu.ptr.p_bool[i] ) { minnlcinequalityshiftfunction((state->scaledbndu.ptr.p_double[i]-state->xc.ptr.p_double[i])*state->rho+1, &p, &dp, &d2p, _state); v = ae_fabs(dp, _state); v = ae_minreal(v, minnlc_aulmaxgrowth, _state); v = ae_maxreal(v, 1/minnlc_aulmaxgrowth, _state); state->nubc.ptr.p_double[2*i+1] = boundval(state->nubc.ptr.p_double[2*i+1]*v, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } } for(i=0; i<=nec+nic-1; i++) { v = ae_v_dotproduct(&state->scaledcleic.ptr.pp_double[i][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = v-state->scaledcleic.ptr.pp_double[i][n]; if( irho, &p, &dp, &d2p, _state); state->nulc.ptr.p_double[i] = boundval(state->nulc.ptr.p_double[i]-dp, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } else { minnlcinequalityshiftfunction(-v*state->rho+1, &p, &dp, &d2p, _state); v = ae_fabs(dp, _state); v = ae_minreal(v, minnlc_aulmaxgrowth, _state); v = ae_maxreal(v, 1/minnlc_aulmaxgrowth, _state); state->nulc.ptr.p_double[i] = boundval(state->nulc.ptr.p_double[i]*v, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } } for(i=1; i<=ng+nh; i++) { /* * NOTE: loop index must start from 1, not zero! */ v = state->fi.ptr.p_double[i]; if( i<=ng ) { minnlcequalitypenaltyfunction(v*state->rho, &p, &dp, &d2p, _state); state->nunlc.ptr.p_double[i-1] = boundval(state->nunlc.ptr.p_double[i-1]-dp, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } else { minnlcinequalityshiftfunction(-v*state->rho+1, &p, &dp, &d2p, _state); v = ae_fabs(dp, _state); v = ae_minreal(v, minnlc_aulmaxgrowth, _state); v = ae_maxreal(v, 1/minnlc_aulmaxgrowth, _state); state->nunlc.ptr.p_double[i-1] = boundval(state->nunlc.ptr.p_double[i-1]*v, -minnlc_maxlagmult, minnlc_maxlagmult, _state); } } outerit = outerit+1; goto lbl_3; lbl_5: for(i=0; i<=n-1; i++) { state->xc.ptr.p_double[i] = state->xc.ptr.p_double[i]*state->s.ptr.p_double[i]; } result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstateaul.ia.ptr.p_int[0] = n; state->rstateaul.ia.ptr.p_int[1] = nec; state->rstateaul.ia.ptr.p_int[2] = nic; state->rstateaul.ia.ptr.p_int[3] = ng; state->rstateaul.ia.ptr.p_int[4] = nh; state->rstateaul.ia.ptr.p_int[5] = i; state->rstateaul.ia.ptr.p_int[6] = j; state->rstateaul.ia.ptr.p_int[7] = outerit; state->rstateaul.ia.ptr.p_int[8] = preccounter; state->rstateaul.ra.ptr.p_double[0] = v; state->rstateaul.ra.ptr.p_double[1] = vv; state->rstateaul.ra.ptr.p_double[2] = p; state->rstateaul.ra.ptr.p_double[3] = dp; state->rstateaul.ra.ptr.p_double[4] = d2p; state->rstateaul.ra.ptr.p_double[5] = v0; state->rstateaul.ra.ptr.p_double[6] = v1; state->rstateaul.ra.ptr.p_double[7] = v2; return result; } /************************************************************************* Unscales X (converts from scaled variables to original ones), paying special attention to box constraints (output is always feasible; active constraints are mapped to active ones). *************************************************************************/ static void minnlc_unscale(minnlcstate* state, /* Real */ ae_vector* xs, /* Real */ ae_vector* scaledbndl, /* Real */ ae_vector* scaledbndu, /* Real */ ae_vector* xu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&xs->ptr.p_double[i]<=scaledbndl->ptr.p_double[i] ) { xu->ptr.p_double[i] = state->bndl.ptr.p_double[i]; continue; } if( state->hasbndu.ptr.p_bool[i]&&xs->ptr.p_double[i]>=scaledbndu->ptr.p_double[i] ) { xu->ptr.p_double[i] = state->bndu.ptr.p_double[i]; continue; } xu->ptr.p_double[i] = xs->ptr.p_double[i]*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&xu->ptr.p_double[i]bndl.ptr.p_double[i] ) { xu->ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&xu->ptr.p_double[i]>state->bndu.ptr.p_double[i] ) { xu->ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } } void _minnlcstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minnlcstate *p = (minnlcstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->lcsrcidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); _rcommstate_init(&p->rstateaul, _state, make_automatic); _rcommstate_init(&p->rstateslp, _state, make_automatic); ae_vector_init(&p->scaledbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->scaledbndu, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->scaledcleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xstart, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dfbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fm2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fm1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fp2, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dfm1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dfp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufd, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufw, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufz, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xk1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gk1, 0, DT_REAL, _state, make_automatic); _minlbfgsstate_init(&p->auloptimizer, _state, make_automatic); _minlbfgsreport_init(&p->aulreport, _state, make_automatic); ae_vector_init(&p->nubc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nulc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->nunlc, 0, DT_REAL, _state, make_automatic); _minslpstate_init(&p->slpsolverstate, _state, make_automatic); _minsqpstate_init(&p->sqpsolverstate, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); } void _minnlcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minnlcstate *dst = (minnlcstate*)_dst; minnlcstate *src = (minnlcstate*)_src; dst->stabilizingpoint = src->stabilizingpoint; dst->initialinequalitymultiplier = src->initialinequalitymultiplier; dst->solvertype = src->solvertype; dst->prectype = src->prectype; dst->updatefreq = src->updatefreq; dst->rho = src->rho; dst->n = src->n; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->aulitscnt = src->aulitscnt; dst->xrep = src->xrep; dst->stpmax = src->stpmax; dst->diffstep = src->diffstep; dst->teststep = src->teststep; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); dst->nec = src->nec; dst->nic = src->nic; ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic); ae_vector_init_copy(&dst->lcsrcidx, &src->lcsrcidx, _state, make_automatic); dst->ng = src->ng; dst->nh = src->nh; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); dst->needfij = src->needfij; dst->needfi = src->needfi; dst->xupdated = src->xupdated; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); _rcommstate_init_copy(&dst->rstateaul, &src->rstateaul, _state, make_automatic); _rcommstate_init_copy(&dst->rstateslp, &src->rstateslp, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndl, &src->scaledbndl, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndu, &src->scaledbndu, _state, make_automatic); ae_matrix_init_copy(&dst->scaledcleic, &src->scaledcleic, _state, make_automatic); ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); ae_vector_init_copy(&dst->xstart, &src->xstart, _state, make_automatic); ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); ae_vector_init_copy(&dst->fbase, &src->fbase, _state, make_automatic); ae_vector_init_copy(&dst->dfbase, &src->dfbase, _state, make_automatic); ae_vector_init_copy(&dst->fm2, &src->fm2, _state, make_automatic); ae_vector_init_copy(&dst->fm1, &src->fm1, _state, make_automatic); ae_vector_init_copy(&dst->fp1, &src->fp1, _state, make_automatic); ae_vector_init_copy(&dst->fp2, &src->fp2, _state, make_automatic); ae_vector_init_copy(&dst->dfm1, &src->dfm1, _state, make_automatic); ae_vector_init_copy(&dst->dfp1, &src->dfp1, _state, make_automatic); ae_vector_init_copy(&dst->bufd, &src->bufd, _state, make_automatic); ae_vector_init_copy(&dst->bufc, &src->bufc, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_matrix_init_copy(&dst->bufw, &src->bufw, _state, make_automatic); ae_matrix_init_copy(&dst->bufz, &src->bufz, _state, make_automatic); ae_vector_init_copy(&dst->xk, &src->xk, _state, make_automatic); ae_vector_init_copy(&dst->xk1, &src->xk1, _state, make_automatic); ae_vector_init_copy(&dst->gk, &src->gk, _state, make_automatic); ae_vector_init_copy(&dst->gk1, &src->gk1, _state, make_automatic); dst->gammak = src->gammak; dst->xkpresent = src->xkpresent; _minlbfgsstate_init_copy(&dst->auloptimizer, &src->auloptimizer, _state, make_automatic); _minlbfgsreport_init_copy(&dst->aulreport, &src->aulreport, _state, make_automatic); ae_vector_init_copy(&dst->nubc, &src->nubc, _state, make_automatic); ae_vector_init_copy(&dst->nulc, &src->nulc, _state, make_automatic); ae_vector_init_copy(&dst->nunlc, &src->nunlc, _state, make_automatic); dst->userterminationneeded = src->userterminationneeded; _minslpstate_init_copy(&dst->slpsolverstate, &src->slpsolverstate, _state, make_automatic); _minsqpstate_init_copy(&dst->sqpsolverstate, &src->sqpsolverstate, _state, make_automatic); dst->smoothnessguardlevel = src->smoothnessguardlevel; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repnfev = src->repnfev; dst->repterminationtype = src->repterminationtype; dst->repbcerr = src->repbcerr; dst->repbcidx = src->repbcidx; dst->replcerr = src->replcerr; dst->replcidx = src->replcidx; dst->repnlcerr = src->repnlcerr; dst->repnlcidx = src->repnlcidx; dst->repdbgphase0its = src->repdbgphase0its; } void _minnlcstate_clear(void* _p) { minnlcstate *p = (minnlcstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_matrix_clear(&p->cleic); ae_vector_clear(&p->lcsrcidx); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); _rcommstate_clear(&p->rstate); _rcommstate_clear(&p->rstateaul); _rcommstate_clear(&p->rstateslp); ae_vector_clear(&p->scaledbndl); ae_vector_clear(&p->scaledbndu); ae_matrix_clear(&p->scaledcleic); ae_vector_clear(&p->xc); ae_vector_clear(&p->xstart); ae_vector_clear(&p->xbase); ae_vector_clear(&p->fbase); ae_vector_clear(&p->dfbase); ae_vector_clear(&p->fm2); ae_vector_clear(&p->fm1); ae_vector_clear(&p->fp1); ae_vector_clear(&p->fp2); ae_vector_clear(&p->dfm1); ae_vector_clear(&p->dfp1); ae_vector_clear(&p->bufd); ae_vector_clear(&p->bufc); ae_vector_clear(&p->tmp0); ae_matrix_clear(&p->bufw); ae_matrix_clear(&p->bufz); ae_vector_clear(&p->xk); ae_vector_clear(&p->xk1); ae_vector_clear(&p->gk); ae_vector_clear(&p->gk1); _minlbfgsstate_clear(&p->auloptimizer); _minlbfgsreport_clear(&p->aulreport); ae_vector_clear(&p->nubc); ae_vector_clear(&p->nulc); ae_vector_clear(&p->nunlc); _minslpstate_clear(&p->slpsolverstate); _minsqpstate_clear(&p->sqpsolverstate); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); } void _minnlcstate_destroy(void* _p) { minnlcstate *p = (minnlcstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_matrix_destroy(&p->cleic); ae_vector_destroy(&p->lcsrcidx); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); _rcommstate_destroy(&p->rstate); _rcommstate_destroy(&p->rstateaul); _rcommstate_destroy(&p->rstateslp); ae_vector_destroy(&p->scaledbndl); ae_vector_destroy(&p->scaledbndu); ae_matrix_destroy(&p->scaledcleic); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->xstart); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->fbase); ae_vector_destroy(&p->dfbase); ae_vector_destroy(&p->fm2); ae_vector_destroy(&p->fm1); ae_vector_destroy(&p->fp1); ae_vector_destroy(&p->fp2); ae_vector_destroy(&p->dfm1); ae_vector_destroy(&p->dfp1); ae_vector_destroy(&p->bufd); ae_vector_destroy(&p->bufc); ae_vector_destroy(&p->tmp0); ae_matrix_destroy(&p->bufw); ae_matrix_destroy(&p->bufz); ae_vector_destroy(&p->xk); ae_vector_destroy(&p->xk1); ae_vector_destroy(&p->gk); ae_vector_destroy(&p->gk1); _minlbfgsstate_destroy(&p->auloptimizer); _minlbfgsreport_destroy(&p->aulreport); ae_vector_destroy(&p->nubc); ae_vector_destroy(&p->nulc); ae_vector_destroy(&p->nunlc); _minslpstate_destroy(&p->slpsolverstate); _minsqpstate_destroy(&p->sqpsolverstate); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); } void _minnlcreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minnlcreport *p = (minnlcreport*)_p; ae_touch_ptr((void*)p); } void _minnlcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minnlcreport *dst = (minnlcreport*)_dst; minnlcreport *src = (minnlcreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->terminationtype = src->terminationtype; dst->bcerr = src->bcerr; dst->bcidx = src->bcidx; dst->lcerr = src->lcerr; dst->lcidx = src->lcidx; dst->nlcerr = src->nlcerr; dst->nlcidx = src->nlcidx; dst->dbgphase0its = src->dbgphase0its; } void _minnlcreport_clear(void* _p) { minnlcreport *p = (minnlcreport*)_p; ae_touch_ptr((void*)p); } void _minnlcreport_destroy(void* _p) { minnlcreport *p = (minnlcreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD) /************************************************************************* BOX CONSTRAINED OPTIMIZATION WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to box constraints (with some of box constraints actually being equality ones). This optimizer uses algorithm similar to that of MinBLEIC (optimizer with general linear constraints), but presence of box-only constraints allows us to use faster constraint activation strategies. On large-scale problems, with multiple constraints active at the solution, this optimizer can be several times faster than BLEIC. REQUIREMENTS: * user must provide function value and gradient * starting point X0 must be feasible or not too far away from the feasible set * grad(f) must be Lipschitz continuous on a level set: L = { x : f(x)<=f(x0) } * function must be defined everywhere on the feasible set F USAGE: Constrained optimization if far more complex than the unconstrained one. Here we give very brief outline of the BC optimizer. We strongly recommend you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide on optimization, which is available at http://www.alglib.net/optimization/ 1. User initializes algorithm state with MinBCCreate() call 2. USer adds box constraints by calling MinBCSetBC() function. 3. User sets stopping conditions with MinBCSetCond(). 4. User calls MinBCOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 5. User calls MinBCResults() to get solution 6. Optionally user may call MinBCRestartFrom() to solve another problem with same N but another starting point. MinBCRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size ofX X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbccreate(ae_int_t n, /* Real */ ae_vector* x, minbcstate* state, ae_state *_state) { ae_frame _frame_block; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); _minbcstate_clear(state); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); ae_assert(n>=1, "MinBCCreate: N<1", _state); ae_assert(x->cnt>=n, "MinBCCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinBCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. CG needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void minbccreatef(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbcstate* state, ae_state *_state) { ae_frame _frame_block; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); _minbcstate_clear(state); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); ae_assert(n>=1, "MinBCCreateF: N<1", _state); ae_assert(x->cnt>=n, "MinBCCreateF: Length(X)nmain; ae_assert(bndl->cnt>=n, "MinBCSetBC: Length(BndL)cnt>=n, "MinBCSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinBCSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinBCSetBC: BndL contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } } /************************************************************************* This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsG - >=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - step vector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinBCSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform slightly more than MaxIts iterations. I.e., MaxIts sets non-strict limit on iterations count. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetcond(minbcstate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinBCSetCond: EpsG is not finite number", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinBCSetCond: negative EpsG", _state); ae_assert(ae_isfinite(epsf, _state), "MinBCSetCond: EpsF is not finite number", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinBCSetCond: negative EpsF", _state); ae_assert(ae_isfinite(epsx, _state), "MinBCSetCond: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinBCSetCond: negative EpsX", _state); ae_assert(maxits>=0, "MinBCSetCond: negative MaxIts!", _state); if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->epsg = epsg; state->epsf = epsf; state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function sets scaling coefficients for BC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the BC too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinBCSetPrec...() functions. There is a special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minbcsetscale(minbcstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->nmain, "MinBCSetScale: Length(S)nmain-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinBCSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinBCSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecdefault(minbcstate* state, ae_state *_state) { state->prectype = 0; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE 1: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecdiag(minbcstate* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(d->cnt>=state->nmain, "MinBCSetPrecDiag: D is too short", _state); for(i=0; i<=state->nmain-1; i++) { ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinBCSetPrecDiag: D contains infinite or NAN elements", _state); ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "MinBCSetPrecDiag: D contains non-positive elements", _state); } rvectorsetlengthatleast(&state->diagh, state->nmain, _state); state->prectype = 2; for(i=0; i<=state->nmain-1; i++) { state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; } } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinBCSetScale() call (before or after MinBCSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetprecscale(minbcstate* state, ae_state *_state) { state->prectype = 3; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinBCOptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetxrep(minbcstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minbcsetstpmax(minbcstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinBCSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinBCSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied gradient, and one which uses function value only and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object (either MinBCCreate() for analytical gradient or MinBCCreateF() for numerical differentiation) you should choose appropriate variant of MinBCOptimize() - one which accepts function AND gradient or one which accepts function ONLY. Be careful to choose variant of MinBCOptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to MinBCOptimize() and specific function used to create optimizer. | USER PASSED TO MinBCOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ MinBCCreateF() | works FAILS MinBCCreate() | FAILS works Here "FAIL" denotes inappropriate combinations of optimizer creation function and MinBCOptimize() version. Attemps to use such combination (for example, to create optimizer with MinBCCreateF() and to pass gradient information to MinCGOptimize()) will lead to exception being thrown. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ ae_bool minbciteration(minbcstate* state, ae_state *_state) { ae_int_t freezeidx; double freezeval; double scaleddnorm; ae_int_t n; ae_int_t m; ae_int_t i; ae_int_t j; double v; double vv; double v0; ae_bool b; ae_int_t mcinfo; ae_int_t itidx; double ginit; double gdecay; ae_bool activationstatus; double activationstep; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { freezeidx = state->rstate.ia.ptr.p_int[0]; n = state->rstate.ia.ptr.p_int[1]; m = state->rstate.ia.ptr.p_int[2]; i = state->rstate.ia.ptr.p_int[3]; j = state->rstate.ia.ptr.p_int[4]; mcinfo = state->rstate.ia.ptr.p_int[5]; itidx = state->rstate.ia.ptr.p_int[6]; b = state->rstate.ba.ptr.p_bool[0]; activationstatus = state->rstate.ba.ptr.p_bool[1]; freezeval = state->rstate.ra.ptr.p_double[0]; scaleddnorm = state->rstate.ra.ptr.p_double[1]; v = state->rstate.ra.ptr.p_double[2]; vv = state->rstate.ra.ptr.p_double[3]; v0 = state->rstate.ra.ptr.p_double[4]; ginit = state->rstate.ra.ptr.p_double[5]; gdecay = state->rstate.ra.ptr.p_double[6]; activationstep = state->rstate.ra.ptr.p_double[7]; } else { freezeidx = 359; n = -58; m = -919; i = -909; j = 81; mcinfo = 255; itidx = 74; b = ae_false; activationstatus = ae_true; freezeval = 205; scaleddnorm = -838; v = 939; vv = -526; v0 = 763; ginit = -541; gdecay = -698; activationstep = -900; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } if( state->rstate.stage==15 ) { goto lbl_15; } if( state->rstate.stage==16 ) { goto lbl_16; } if( state->rstate.stage==17 ) { goto lbl_17; } if( state->rstate.stage==18 ) { goto lbl_18; } if( state->rstate.stage==19 ) { goto lbl_19; } if( state->rstate.stage==20 ) { goto lbl_20; } if( state->rstate.stage==21 ) { goto lbl_21; } if( state->rstate.stage==22 ) { goto lbl_22; } if( state->rstate.stage==23 ) { goto lbl_23; } if( state->rstate.stage==24 ) { goto lbl_24; } if( state->rstate.stage==25 ) { goto lbl_25; } if( state->rstate.stage==26 ) { goto lbl_26; } if( state->rstate.stage==27 ) { goto lbl_27; } if( state->rstate.stage==28 ) { goto lbl_28; } if( state->rstate.stage==29 ) { goto lbl_29; } /* * Routine body */ /* * Algorithm parameters: * * M number of L-BFGS corrections. * This coefficient remains fixed during iterations. * * GDecay desired decrease of constrained gradient during L-BFGS iterations. * This coefficient is decreased after each L-BFGS round until * it reaches minimum decay. */ m = ae_minint(5, state->nmain, _state); gdecay = minbc_initialdecay; /* * Init */ n = state->nmain; for(i=0; i<=n-1; i++) { state->xc.ptr.p_double[i] = state->xstart.ptr.p_double[i]; } if( !enforceboundaryconstraints(&state->xc, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, 0, _state) ) { /* * Inconsistent constraints */ state->repterminationtype = -3; result = ae_false; return result; } state->userterminationneeded = ae_false; state->repterminationtype = 0; state->repiterationscount = 0; state->repnfev = 0; state->repvaridx = -1; rmatrixsetlengthatleast(&state->bufyk, m+1, n, _state); rmatrixsetlengthatleast(&state->bufsk, m+1, n, _state); rvectorsetlengthatleast(&state->bufrho, m, _state); rvectorsetlengthatleast(&state->buftheta, m, _state); rvectorsetlengthatleast(&state->tmp0, n, _state); smoothnessmonitorinit(&state->smonitor, n, 1, state->smoothnessguardlevel>0, _state); for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; state->invs.ptr.p_double[i] = 1/state->s.ptr.p_double[i]; } /* * Fill TmpPrec with current preconditioner */ rvectorsetlengthatleast(&state->tmpprec, n, _state); for(i=0; i<=n-1; i++) { if( state->prectype==2 ) { state->tmpprec.ptr.p_double[i] = 1/state->diagh.ptr.p_double[i]; continue; } if( state->prectype==3 ) { state->tmpprec.ptr.p_double[i] = ae_sqr(state->s.ptr.p_double[i], _state); continue; } state->tmpprec.ptr.p_double[i] = (double)(1); } /* * Check correctness of user-supplied gradient */ minbc_clearrequestfields(state, _state); if( !(ae_fp_eq(state->diffstep,(double)(0))&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_30; } lbl_32: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xc, &state->s, &state->bndl, &state->bndu, ae_true, state->teststep, _state) ) { goto lbl_33; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfg = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfg = ae_false; state->smonitor.fi.ptr.p_double[0] = state->f; for(i=0; i<=n-1; i++) { state->smonitor.j.ptr.pp_double[0][i] = state->g.ptr.p_double[i]; } goto lbl_32; lbl_33: lbl_30: /* * Main cycle of BC-PG algorithm */ state->repterminationtype = 0; state->lastscaledgoodstep = (double)(0); state->nonmonotoniccnt = ae_round(1.5*n, _state)+5; ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbc_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_34; } state->needfg = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needfg = ae_false; goto lbl_35; lbl_34: state->needf = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->needf = ae_false; lbl_35: state->fc = state->f; trimprepare(state->f, &state->trimthreshold, _state); state->repnfev = state->repnfev+1; if( !state->xrep ) { goto lbl_36; } /* * Report current point */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fc; state->xupdated = ae_true; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->xupdated = ae_false; lbl_36: if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; result = ae_false; return result; } lbl_38: if( ae_false ) { goto lbl_39; } /* * Steepest descent phase * * (a) calculate unconstrained gradient * (b) check F/G for NAN/INF, abnormally terminate algorithm if needed * (c) perform one steepest descent step, activating only those constraints * which prevent us from moving outside of box-constrained area */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbc_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_40; } /* * Analytic gradient */ state->needfg = ae_true; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->needfg = ae_false; goto lbl_41; lbl_40: /* * Numerical differentiation */ state->needf = ae_true; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->fbase = state->f; i = 0; lbl_42: if( i>n-1 ) { goto lbl_44; } v = state->x.ptr.p_double[i]; b = ae_false; if( state->hasbndl.ptr.p_bool[i] ) { b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); } if( state->hasbndu.ptr.p_bool[i] ) { b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); } if( b ) { goto lbl_45; } state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->fp2 = state->f; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); goto lbl_46; lbl_45: state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) { state->xm1 = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) { state->xp1 = state->bndu.ptr.p_double[i]; } state->x.ptr.p_double[i] = state->xm1; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->fm1 = state->f; state->x.ptr.p_double[i] = state->xp1; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->fp1 = state->f; if( ae_fp_neq(state->xm1,state->xp1) ) { state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); } else { state->g.ptr.p_double[i] = (double)(0); } lbl_46: state->x.ptr.p_double[i] = v; i = i+1; goto lbl_42; lbl_44: state->f = state->fbase; state->needf = ae_false; lbl_41: state->fc = state->f; ae_v_move(&state->ugc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); projectgradientintobc(&state->xc, &state->cgc, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, 0, _state); ginit = 0.0; for(i=0; i<=n-1; i++) { ginit = ginit+ae_sqr(state->cgc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } ginit = ae_sqrt(ginit, _state); if( !ae_isfinite(ginit, _state)||!ae_isfinite(state->fc, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; result = ae_false; return result; } if( ae_fp_less_eq(ginit,state->epsg) ) { /* * Gradient is small enough. * Optimization is terminated */ state->repterminationtype = 4; result = ae_false; return result; } for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = -state->tmpprec.ptr.p_double[i]*state->cgc.ptr.p_double[i]; } scaleddnorm = (double)(0); for(i=0; i<=n-1; i++) { scaleddnorm = scaleddnorm+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } scaleddnorm = ae_sqrt(scaleddnorm, _state); ae_assert(ae_fp_greater(scaleddnorm,(double)(0)), "MinBC: integrity check failed", _state); if( ae_fp_greater(state->lastscaledgoodstep,(double)(0)) ) { state->stp = state->lastscaledgoodstep/scaleddnorm; } else { state->stp = 1.0/scaleddnorm; } calculatestepbound(&state->xc, &state->d, 1.0, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, 0, &freezeidx, &freezeval, &state->curstpmax, _state); activationstep = state->curstpmax; if( freezeidx<0||ae_fp_greater(state->curstpmax,1.0E50) ) { state->curstpmax = 1.0E50; } if( ae_fp_greater(state->stpmax,(double)(0)) ) { state->curstpmax = ae_minreal(state->curstpmax, state->stpmax/scaleddnorm, _state); } ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgn.ptr.p_double[0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugn.ptr.p_double[0], 1, &state->ugc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fn = state->fc; state->mcstage = 0; smoothnessmonitorstartlinesearch1u(&state->smonitor, &state->s, &state->invs, &state->xn, state->fn, &state->ugn, _state); mcsrch(n, &state->xn, &state->fn, &state->cgn, &state->d, &state->stp, state->curstpmax, minbc_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); lbl_47: if( state->mcstage==0 ) { goto lbl_48; } /* * Copy XN to X, perform on-the-fly correction w.r.t box * constraints (projection onto feasible set). */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xn.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->x.ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->x.ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } /* * Gradient, either user-provided or numerical differentiation */ minbc_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_49; } /* * Analytic gradient */ state->needfg = ae_true; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->needfg = ae_false; state->repnfev = state->repnfev+1; goto lbl_50; lbl_49: /* * Numerical differentiation */ state->needf = ae_true; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->fbase = state->f; i = 0; lbl_51: if( i>n-1 ) { goto lbl_53; } v = state->x.ptr.p_double[i]; b = ae_false; if( state->hasbndl.ptr.p_bool[i] ) { b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); } if( state->hasbndu.ptr.p_bool[i] ) { b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); } if( b ) { goto lbl_54; } state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 15; goto lbl_rcomm; lbl_15: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 16; goto lbl_rcomm; lbl_16: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 17; goto lbl_rcomm; lbl_17: state->fp2 = state->f; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); state->repnfev = state->repnfev+4; goto lbl_55; lbl_54: state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) { state->xm1 = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) { state->xp1 = state->bndu.ptr.p_double[i]; } state->x.ptr.p_double[i] = state->xm1; state->rstate.stage = 18; goto lbl_rcomm; lbl_18: state->fm1 = state->f; state->x.ptr.p_double[i] = state->xp1; state->rstate.stage = 19; goto lbl_rcomm; lbl_19: state->fp1 = state->f; if( ae_fp_neq(state->xm1,state->xp1) ) { state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); } else { state->g.ptr.p_double[i] = (double)(0); } state->repnfev = state->repnfev+2; lbl_55: state->x.ptr.p_double[i] = v; i = i+1; goto lbl_51; lbl_53: state->f = state->fbase; state->needf = ae_false; lbl_50: /* * Back to MCSRCH */ smoothnessmonitorenqueuepoint1u(&state->smonitor, &state->s, &state->invs, &state->d, state->stp, &state->x, state->f, &state->g, _state); trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); state->fn = state->f; ae_v_move(&state->cgn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->d.ptr.p_double[i],(double)(0)) ) { state->cgn.ptr.p_double[i] = (double)(0); } } mcsrch(n, &state->xn, &state->fn, &state->cgn, &state->d, &state->stp, state->curstpmax, minbc_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); goto lbl_47; lbl_48: smoothnessmonitorfinalizelinesearch(&state->smonitor, _state); v = state->fn; for(i=0; i<=n-1; i++) { v = 0.1*v+state->ugn.ptr.p_double[i]; } if( !ae_isfinite(v, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } if( mcinfo!=1&&mcinfo!=5 ) { /* * We can not find step which decreases function value. We have * two possibilities: * (a) numerical properties of the function do not allow us to * find good step. * (b) we are close to activation of some constraint, and it is * so close that step which activates it leads to change in * target function which is smaller than numerical noise. * * Optimization algorithm must be able to handle case (b), because * inability to handle it will cause failure when algorithm * started very close to boundary of the feasible area. * * In order to correctly handle such cases we allow limited amount * of small steps which increase function value. */ if( (freezeidx>=0&&ae_fp_less_eq(scaleddnorm*state->curstpmax,minbc_maxnonmonotoniclen))&&state->nonmonotoniccnt>0 ) { /* * We enforce non-monotonic step: * * Stp := CurStpMax * * MCINFO := 5 * * XN := XC+CurStpMax*D * * non-monotonic counter is decreased * * NOTE: UGN/CGN are not updated because step is so short that we assume that * GN is approximately equal to GC. */ state->stp = state->curstpmax; mcinfo = 5; v = state->curstpmax; ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_addd(&state->xn.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), v); state->nonmonotoniccnt = state->nonmonotoniccnt-1; } else { /* * Numerical properties of the function does not allow * us to solve problem. Algorithm is terminated */ state->repterminationtype = 7; result = ae_false; return result; } } if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; result = ae_false; return result; } ae_assert(mcinfo!=5||ae_fp_eq(state->stp,state->curstpmax), "MinBC: integrity check failed", _state); postprocessboundedstep(&state->xn, &state->xc, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, 0, freezeidx, freezeval, state->stp, activationstep, _state); state->fp = state->fc; state->fc = state->fn; ae_v_move(&state->xp.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->xc.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgc.ptr.p_double[0], 1, &state->cgn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugc.ptr.p_double[0], 1, &state->ugn.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !state->xrep ) { goto lbl_56; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbc_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 20; goto lbl_rcomm; lbl_20: state->xupdated = ae_false; lbl_56: state->repiterationscount = state->repiterationscount+1; if( mcinfo==1 ) { v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr((state->xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( ae_fp_less_eq(v,state->epsx) ) { /* * Step is small enough */ state->repterminationtype = 2; result = ae_false; return result; } if( ae_fp_less_eq(ae_fabs(state->fp-state->fc, _state),state->epsf*ae_maxreal(ae_fabs(state->fc, _state), ae_maxreal(ae_fabs(state->fp, _state), 1.0, _state), _state)) ) { /* * Function change is small enough */ state->repterminationtype = 1; result = ae_false; return result; } } if( state->maxits>0&&state->repiterationscount>=state->maxits ) { /* * Iteration counter exceeded limit */ state->repterminationtype = 5; result = ae_false; return result; } /* * LBFGS stage: * * during LBFGS iterations we activate new constraints, but never * deactivate already active ones. * * we perform at most N iterations of LBFGS before re-evaluating * active set and restarting LBFGS. * * About termination: * * LBFGS iterations can be terminated because of two reasons: * * "termination" - non-zero termination code in RepTerminationType, * which means that optimization is done * * "restart" - zero RepTerminationType, which means that we * have to re-evaluate active set and resume LBFGS stage. * * one more option is "refresh" - to continue LBFGS iterations, * but with all BFGS updates (Sk/Yk pairs) being dropped; * it happens after changes in active set */ ginit = 0.0; for(i=0; i<=n-1; i++) { state->cgc.ptr.p_double[i] = state->ugc.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->cgc.ptr.p_double[i] = (double)(0); } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cgc.ptr.p_double[i] = (double)(0); } ginit = ginit+ae_sqr(state->cgc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } ginit = ae_sqrt(ginit, _state); state->bufsize = 0; itidx = 0; lbl_58: if( itidx>n-1 ) { goto lbl_60; } /* * At the beginning of each iteration: * * XC stores current point * * FC stores current function value * * UGC stores current unconstrained gradient * * CGC stores current constrained gradient * * D stores constrained step direction (calculated at this block) * * 1. Calculate search direction D according to L-BFGS algorithm * using constrained preconditioner to perform inner multiplication. * 2. Evaluate scaled length of direction D; restart LBFGS if D is zero * (it may be possible that we found minimum, but it is also possible * that some constraints need deactivation) * 3. If D is non-zero, try to use previous scaled step length as initial estimate for new step. * 4. Calculate bound on step length. */ ae_v_move(&state->work.ptr.p_double[0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=state->bufsize-1; i>=0; i--) { v = ae_v_dotproduct(&state->bufsk.ptr.pp_double[i][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->buftheta.ptr.p_double[i] = v; vv = v*state->bufrho.ptr.p_double[i]; ae_v_subd(&state->work.ptr.p_double[0], 1, &state->bufyk.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), vv); } for(i=0; i<=n-1; i++) { state->work.ptr.p_double[i] = state->tmpprec.ptr.p_double[i]*state->work.ptr.p_double[i]; } for(i=0; i<=state->bufsize-1; i++) { v = ae_v_dotproduct(&state->bufyk.ptr.pp_double[i][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); vv = state->bufrho.ptr.p_double[i]*(-v+state->buftheta.ptr.p_double[i]); ae_v_addd(&state->work.ptr.p_double[0], 1, &state->bufsk.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), vv); } ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); b = ae_false; for(i=0; i<=n-1; i++) { b = b||((state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_neq(state->d.ptr.p_double[i],(double)(0))); b = b||((state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_neq(state->d.ptr.p_double[i],(double)(0))); } ae_assert(!b, "MinBC: integrity check failed (q)", _state); scaleddnorm = (double)(0); for(i=0; i<=n-1; i++) { scaleddnorm = scaleddnorm+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } scaleddnorm = ae_sqrt(scaleddnorm, _state); if( ae_fp_eq(scaleddnorm,(double)(0)) ) { /* * Search direction is zero. * Skip back to steepest descent phase. */ goto lbl_60; } if( ae_fp_greater(state->lastscaledgoodstep,(double)(0)) ) { state->stp = state->lastscaledgoodstep/scaleddnorm; } else { state->stp = 1.0/scaleddnorm; } state->curstpmax = 1.0E50; if( ae_fp_greater(state->stpmax,(double)(0)) ) { state->curstpmax = ae_minreal(state->curstpmax, state->stpmax/scaleddnorm, _state); } /* * Minimize G(t) = F(CONSTRAIN(XC + t*D)), with t being scalar, XC and D being vectors. */ ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgn.ptr.p_double[0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugn.ptr.p_double[0], 1, &state->ugc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fn = state->fc; state->mcstage = 0; smoothnessmonitorstartlinesearch1u(&state->smonitor, &state->s, &state->invs, &state->xn, state->fn, &state->ugn, _state); mcsrch(n, &state->xn, &state->fn, &state->cgn, &state->d, &state->stp, state->curstpmax, minbc_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); lbl_61: if( state->mcstage==0 ) { goto lbl_62; } /* * Copy XN to X, perform on-the-fly correction w.r.t box * constraints (projection onto feasible set). */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xn.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->x.ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->x.ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } /* * Gradient, either user-provided or numerical differentiation */ minbc_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_63; } /* * Analytic gradient */ state->needfg = ae_true; state->rstate.stage = 21; goto lbl_rcomm; lbl_21: state->needfg = ae_false; state->repnfev = state->repnfev+1; goto lbl_64; lbl_63: /* * Numerical differentiation */ state->needf = ae_true; state->rstate.stage = 22; goto lbl_rcomm; lbl_22: state->fbase = state->f; i = 0; lbl_65: if( i>n-1 ) { goto lbl_67; } v = state->x.ptr.p_double[i]; b = ae_false; if( state->hasbndl.ptr.p_bool[i] ) { b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); } if( state->hasbndu.ptr.p_bool[i] ) { b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); } if( b ) { goto lbl_68; } state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 23; goto lbl_rcomm; lbl_23: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 24; goto lbl_rcomm; lbl_24: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 25; goto lbl_rcomm; lbl_25: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 26; goto lbl_rcomm; lbl_26: state->fp2 = state->f; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); state->repnfev = state->repnfev+4; goto lbl_69; lbl_68: state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) { state->xm1 = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) { state->xp1 = state->bndu.ptr.p_double[i]; } state->x.ptr.p_double[i] = state->xm1; state->rstate.stage = 27; goto lbl_rcomm; lbl_27: state->fm1 = state->f; state->x.ptr.p_double[i] = state->xp1; state->rstate.stage = 28; goto lbl_rcomm; lbl_28: state->fp1 = state->f; if( ae_fp_neq(state->xm1,state->xp1) ) { state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); } else { state->g.ptr.p_double[i] = (double)(0); } state->repnfev = state->repnfev+2; lbl_69: state->x.ptr.p_double[i] = v; i = i+1; goto lbl_65; lbl_67: state->f = state->fbase; state->needf = ae_false; lbl_64: /* * Back to MCSRCH */ smoothnessmonitorenqueuepoint1u(&state->smonitor, &state->s, &state->invs, &state->d, state->stp, &state->x, state->f, &state->g, _state); trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); state->fn = state->f; for(i=0; i<=n-1; i++) { state->ugn.ptr.p_double[i] = state->g.ptr.p_double[i]; state->cgn.ptr.p_double[i] = state->g.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->cgn.ptr.p_double[i] = (double)(0); } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->cgn.ptr.p_double[i] = (double)(0); } } mcsrch(n, &state->xn, &state->fn, &state->cgn, &state->d, &state->stp, state->curstpmax, minbc_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); goto lbl_61; lbl_62: smoothnessmonitorfinalizelinesearch(&state->smonitor, _state); for(i=0; i<=n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->xn.ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->xn.ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } ae_v_moveneg(&state->bufsk.ptr.pp_double[state->bufsize][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_moveneg(&state->bufyk.ptr.pp_double[state->bufsize][0], 1, &state->cgc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->bufsk.ptr.pp_double[state->bufsize][0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->bufyk.ptr.pp_double[state->bufsize][0], 1, &state->cgn.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Handle special situations: * * check for presence of NAN/INF in function/gradient * * handle failure of line search */ v = state->fn; for(i=0; i<=n-1; i++) { v = 0.1*v+state->ugn.ptr.p_double[i]; } if( !ae_isfinite(v, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; result = ae_false; return result; } if( mcinfo!=1 ) { /* * Terminate LBFGS phase */ goto lbl_60; } /* * Current point is updated: * * move XC/FC/GC to XP/FP/GP * * move XN/FN/GN to XC/FC/GC * * report current point and update iterations counter * * push new pair SK/YK to LBFGS buffer * * update length of the good step */ activationstatus = ae_false; for(i=0; i<=n-1; i++) { if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_neq(state->xn.ptr.p_double[i],state->xc.ptr.p_double[i]) ) { activationstatus = ae_true; } if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_neq(state->xn.ptr.p_double[i],state->xc.ptr.p_double[i]) ) { activationstatus = ae_true; } } state->fp = state->fc; state->fc = state->fn; ae_v_move(&state->xp.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->xc.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->cgc.ptr.p_double[0], 1, &state->cgn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ugc.ptr.p_double[0], 1, &state->ugn.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !state->xrep ) { goto lbl_70; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minbc_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 29; goto lbl_rcomm; lbl_29: state->xupdated = ae_false; lbl_70: state->repiterationscount = state->repiterationscount+1; if( state->bufsize==m ) { /* * Buffer is full, shift contents by one row */ for(i=0; i<=state->bufsize-1; i++) { ae_v_move(&state->bufsk.ptr.pp_double[i][0], 1, &state->bufsk.ptr.pp_double[i+1][0], 1, ae_v_len(0,n-1)); ae_v_move(&state->bufyk.ptr.pp_double[i][0], 1, &state->bufyk.ptr.pp_double[i+1][0], 1, ae_v_len(0,n-1)); } for(i=0; i<=state->bufsize-2; i++) { state->bufrho.ptr.p_double[i] = state->bufrho.ptr.p_double[i+1]; state->buftheta.ptr.p_double[i] = state->buftheta.ptr.p_double[i+1]; } } else { /* * Buffer is not full, increase buffer size by 1 */ state->bufsize = state->bufsize+1; } v = ae_v_dotproduct(&state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, &state->bufsk.ptr.pp_double[state->bufsize-1][0], 1, ae_v_len(0,n-1)); vv = ae_v_dotproduct(&state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, &state->bufyk.ptr.pp_double[state->bufsize-1][0], 1, ae_v_len(0,n-1)); if( ae_fp_eq(v,(double)(0))||ae_fp_eq(vv,(double)(0)) ) { /* * Strange internal error in LBFGS - either YK=0 * (which should not have been) or (SK,YK)=0 (again, * unexpected). It should not take place because * MCINFO=1, which signals "good" step. But just * to be sure we have special branch of code which * restarts LBFGS */ goto lbl_60; } state->bufrho.ptr.p_double[state->bufsize-1] = 1/v; ae_assert(state->bufsize<=m, "MinBC: internal error", _state); v = (double)(0); vv = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr((state->xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); vv = vv+ae_sqr(state->xc.ptr.p_double[i]-state->xp.ptr.p_double[i], _state); } minbc_updateestimateofgoodstep(&state->lastscaledgoodstep, ae_sqrt(v, _state), _state); /* * Check MaxIts-based stopping condition. */ if( state->maxits>0&&state->repiterationscount>=state->maxits ) { state->repterminationtype = 5; result = ae_false; return result; } /* * Smooth reset (LBFGS memory model is refreshed) or hard restart: * * LBFGS model is refreshed, if line search was performed with activation of constraints * * algorithm is restarted if scaled gradient decreased below GDecay */ if( activationstatus ) { state->bufsize = 0; goto lbl_59; } v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->cgc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( ae_fp_less(ae_sqrt(v, _state),gdecay*ginit) ) { goto lbl_60; } lbl_59: itidx = itidx+1; goto lbl_58; lbl_60: /* * Decrease decay coefficient. Subsequent L-BFGS stages will * have more stringent stopping criteria. */ gdecay = ae_maxreal(gdecay*minbc_decaycorrection, minbc_mindecay, _state); goto lbl_38; lbl_39: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = freezeidx; state->rstate.ia.ptr.p_int[1] = n; state->rstate.ia.ptr.p_int[2] = m; state->rstate.ia.ptr.p_int[3] = i; state->rstate.ia.ptr.p_int[4] = j; state->rstate.ia.ptr.p_int[5] = mcinfo; state->rstate.ia.ptr.p_int[6] = itidx; state->rstate.ba.ptr.p_bool[0] = b; state->rstate.ba.ptr.p_bool[1] = activationstatus; state->rstate.ra.ptr.p_double[0] = freezeval; state->rstate.ra.ptr.p_double[1] = scaleddnorm; state->rstate.ra.ptr.p_double[2] = v; state->rstate.ra.ptr.p_double[3] = vv; state->rstate.ra.ptr.p_double[4] = v0; state->rstate.ra.ptr.p_double[5] = ginit; state->rstate.ra.ptr.p_double[6] = gdecay; state->rstate.ra.ptr.p_double[7] = activationstep; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with minbcoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minbcsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minbcoptguardgradient(minbcstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinBCOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinBCOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardsmoothness(minbcstate* state, ae_int_t level, ae_state *_state) { ae_assert(level==0||level==1, "MinBCOptGuardSmoothness: unexpected value of level parameter", _state); state->smoothnessguardlevel = level; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * minbcoptguardgradient() for gradient verification * minbcoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * minbcoptguardnonc1test0results() * minbcoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardresults(minbcstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardnonc1test0results(minbcstate* state, optguardnonc1test0report* strrep, optguardnonc1test0report* lngrep, ae_state *_state) { _optguardnonc1test0report_clear(strrep); _optguardnonc1test0report_clear(lngrep); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minbcoptguardnonc1test1results(minbcstate* state, optguardnonc1test1report* strrep, optguardnonc1test1report* lngrep, ae_state *_state) { _optguardnonc1test1report_clear(strrep); _optguardnonc1test1report_clear(lngrep); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* BC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 inconsistent constraints. * 1 relative function improvement is no more than EpsF. * 2 scaled step is no more than EpsX. * 4 scaled gradient norm is no more than EpsG. * 5 MaxIts steps was taken * 8 terminated by user who called minbcrequesttermination(). X contains point which was "current accepted" when termination request was submitted. More information about fields of this structure can be found in the comments on MinBCReport datatype. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcresults(minbcstate* state, /* Real */ ae_vector* x, minbcreport* rep, ae_state *_state) { ae_vector_clear(x); _minbcreport_clear(rep); minbcresultsbuf(state, x, rep, _state); } /************************************************************************* BC results Buffered implementation of MinBCResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcresultsbuf(minbcstate* state, /* Real */ ae_vector* x, minbcreport* rep, ae_state *_state) { ae_int_t i; if( x->cntnmain ) { ae_vector_set_length(x, state->nmain, _state); } rep->iterationscount = state->repiterationscount; rep->nfev = state->repnfev; rep->varidx = state->repvaridx; rep->terminationtype = state->repterminationtype; if( state->repterminationtype>0 ) { ae_v_move(&x->ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,state->nmain-1)); } else { for(i=0; i<=state->nmain-1; i++) { x->ptr.p_double[i] = _state->v_nan; } } } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with MinBCCreate call. X - new starting point. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbcrestartfrom(minbcstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->nmain; /* * First, check for errors in the inputs */ ae_assert(x->cnt>=n, "MinBCRestartFrom: Length(X)xstart.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * prepare RComm facilities */ ae_vector_set_length(&state->rstate.ia, 6+1, _state); ae_vector_set_length(&state->rstate.ba, 1+1, _state); ae_vector_set_length(&state->rstate.ra, 7+1, _state); state->rstate.stage = -1; minbc_clearrequestfields(state, _state); } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minbcrequesttermination(minbcstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* Clears request fileds (to be sure that we don't forget to clear something) *************************************************************************/ static void minbc_clearrequestfields(minbcstate* state, ae_state *_state) { state->needf = ae_false; state->needfg = ae_false; state->xupdated = ae_false; } /************************************************************************* Internal initialization subroutine. *************************************************************************/ static void minbc_minbcinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minbcstate* state, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); /* * Initialize */ state->teststep = (double)(0); state->smoothnessguardlevel = 0; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); state->nmain = n; state->diffstep = diffstep; rvectorsetlengthatleast(&state->bndl, n, _state); bvectorsetlengthatleast(&state->hasbndl, n, _state); rvectorsetlengthatleast(&state->bndu, n, _state); bvectorsetlengthatleast(&state->hasbndu, n, _state); rvectorsetlengthatleast(&state->xstart, n, _state); rvectorsetlengthatleast(&state->xc, n, _state); rvectorsetlengthatleast(&state->cgc, n, _state); rvectorsetlengthatleast(&state->ugc, n, _state); rvectorsetlengthatleast(&state->xn, n, _state); rvectorsetlengthatleast(&state->cgn, n, _state); rvectorsetlengthatleast(&state->ugn, n, _state); rvectorsetlengthatleast(&state->xp, n, _state); rvectorsetlengthatleast(&state->d, n, _state); rvectorsetlengthatleast(&state->s, n, _state); rvectorsetlengthatleast(&state->invs, n, _state); rvectorsetlengthatleast(&state->lastscaleused, n, _state); rvectorsetlengthatleast(&state->x, n, _state); rvectorsetlengthatleast(&state->g, n, _state); rvectorsetlengthatleast(&state->work, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->hasbndl.ptr.p_bool[i] = ae_false; state->bndu.ptr.p_double[i] = _state->v_posinf; state->hasbndu.ptr.p_bool[i] = ae_false; state->s.ptr.p_double[i] = 1.0; state->invs.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; } minbcsetcond(state, 0.0, 0.0, 0.0, 0, _state); minbcsetxrep(state, ae_false, _state); minbcsetstpmax(state, 0.0, _state); minbcsetprecdefault(state, _state); minbcrestartfrom(state, x, _state); ae_frame_leave(_state); } /************************************************************************* This subroutine updates estimate of the good step length given: 1) previous estimate 2) new length of the good step It makes sure that estimate does not change too rapidly - ratio of new and old estimates will be at least 0.01, at most 100.0 In case previous estimate of good step is zero (no estimate), new estimate is used unconditionally. -- ALGLIB -- Copyright 16.01.2013 by Bochkanov Sergey *************************************************************************/ static void minbc_updateestimateofgoodstep(double* estimate, double newstep, ae_state *_state) { if( ae_fp_eq(*estimate,(double)(0)) ) { *estimate = newstep; return; } if( ae_fp_less(newstep,*estimate*0.01) ) { *estimate = *estimate*0.01; return; } if( ae_fp_greater(newstep,*estimate*100) ) { *estimate = *estimate*100; return; } *estimate = newstep; } void _minbcstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minbcstate *p = (minbcstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ugc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ugn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->cgn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xstart, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpprec, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic); _linminstate_init(&p->lstate, _state, make_automatic); ae_matrix_init(&p->bufyk, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->bufsk, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bufrho, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->buftheta, 0, DT_REAL, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invs, 0, DT_REAL, _state, make_automatic); } void _minbcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minbcstate *dst = (minbcstate*)_dst; minbcstate *src = (minbcstate*)_src; dst->nmain = src->nmain; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->stpmax = src->stpmax; dst->diffstep = src->diffstep; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); dst->prectype = src->prectype; ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic); ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needf = src->needf; dst->needfg = src->needfg; dst->xupdated = src->xupdated; dst->userterminationneeded = src->userterminationneeded; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); ae_vector_init_copy(&dst->ugc, &src->ugc, _state, make_automatic); ae_vector_init_copy(&dst->cgc, &src->cgc, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->ugn, &src->ugn, _state, make_automatic); ae_vector_init_copy(&dst->cgn, &src->cgn, _state, make_automatic); ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic); dst->fc = src->fc; dst->fn = src->fn; dst->fp = src->fp; ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->lastscaledgoodstep = src->lastscaledgoodstep; ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); dst->repiterationscount = src->repiterationscount; dst->repnfev = src->repnfev; dst->repvaridx = src->repvaridx; dst->repterminationtype = src->repterminationtype; ae_vector_init_copy(&dst->xstart, &src->xstart, _state, make_automatic); dst->fbase = src->fbase; dst->fm2 = src->fm2; dst->fm1 = src->fm1; dst->fp1 = src->fp1; dst->fp2 = src->fp2; dst->xm1 = src->xm1; dst->xp1 = src->xp1; dst->gm1 = src->gm1; dst->gp1 = src->gp1; ae_vector_init_copy(&dst->tmpprec, &src->tmpprec, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); dst->nfev = src->nfev; dst->mcstage = src->mcstage; dst->stp = src->stp; dst->curstpmax = src->curstpmax; ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic); _linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic); dst->trimthreshold = src->trimthreshold; dst->nonmonotoniccnt = src->nonmonotoniccnt; ae_matrix_init_copy(&dst->bufyk, &src->bufyk, _state, make_automatic); ae_matrix_init_copy(&dst->bufsk, &src->bufsk, _state, make_automatic); ae_vector_init_copy(&dst->bufrho, &src->bufrho, _state, make_automatic); ae_vector_init_copy(&dst->buftheta, &src->buftheta, _state, make_automatic); dst->bufsize = src->bufsize; dst->teststep = src->teststep; dst->smoothnessguardlevel = src->smoothnessguardlevel; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); ae_vector_init_copy(&dst->invs, &src->invs, _state, make_automatic); } void _minbcstate_clear(void* _p) { minbcstate *p = (minbcstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_vector_clear(&p->diagh); ae_vector_clear(&p->x); ae_vector_clear(&p->g); _rcommstate_clear(&p->rstate); ae_vector_clear(&p->xc); ae_vector_clear(&p->ugc); ae_vector_clear(&p->cgc); ae_vector_clear(&p->xn); ae_vector_clear(&p->ugn); ae_vector_clear(&p->cgn); ae_vector_clear(&p->xp); ae_vector_clear(&p->d); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->xstart); ae_vector_clear(&p->tmpprec); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->work); _linminstate_clear(&p->lstate); ae_matrix_clear(&p->bufyk); ae_matrix_clear(&p->bufsk); ae_vector_clear(&p->bufrho); ae_vector_clear(&p->buftheta); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); ae_vector_clear(&p->invs); } void _minbcstate_destroy(void* _p) { minbcstate *p = (minbcstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_vector_destroy(&p->diagh); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); _rcommstate_destroy(&p->rstate); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->ugc); ae_vector_destroy(&p->cgc); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->ugn); ae_vector_destroy(&p->cgn); ae_vector_destroy(&p->xp); ae_vector_destroy(&p->d); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->xstart); ae_vector_destroy(&p->tmpprec); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->work); _linminstate_destroy(&p->lstate); ae_matrix_destroy(&p->bufyk); ae_matrix_destroy(&p->bufsk); ae_vector_destroy(&p->bufrho); ae_vector_destroy(&p->buftheta); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); ae_vector_destroy(&p->invs); } void _minbcreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minbcreport *p = (minbcreport*)_p; ae_touch_ptr((void*)p); } void _minbcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minbcreport *dst = (minbcreport*)_dst; minbcreport *src = (minbcreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->varidx = src->varidx; dst->terminationtype = src->terminationtype; } void _minbcreport_clear(void* _p) { minbcreport *p = (minbcreport*)_p; ae_touch_ptr((void*)p); } void _minbcreport_destroy(void* _p) { minbcreport *p = (minbcreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* NONSMOOTH NONCONVEX OPTIMIZATION SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS DESCRIPTION: The subroutine minimizes function F(x) of N arguments subject to any combination of: * bound constraints * linear inequality constraints * linear equality constraints * nonlinear equality constraints Gi(x)=0 * nonlinear inequality constraints Hi(x)<=0 IMPORTANT: see MinNSSetAlgoAGS for important information on performance restrictions of AGS solver. REQUIREMENTS: * starting point X0 must be feasible or not too far away from the feasible set * F(), G(), H() are continuous, locally Lipschitz and continuously (but not necessarily twice) differentiable in an open dense subset of R^N. Functions F(), G() and H() may be nonsmooth and non-convex. Informally speaking, it means that functions are composed of large differentiable "patches" with nonsmoothness having place only at the boundaries between these "patches". Most real-life nonsmooth functions satisfy these requirements. Say, anything which involves finite number of abs(), min() and max() is very likely to pass the test. Say, it is possible to optimize anything of the following: * f=abs(x0)+2*abs(x1) * f=max(x0,x1) * f=sin(max(x0,x1)+abs(x2)) * for nonlinearly constrained problems: F() must be bounded from below without nonlinear constraints (this requirement is due to the fact that, contrary to box and linear constraints, nonlinear ones require special handling). * user must provide function value and gradient for F(), H(), G() at all points where function/gradient can be calculated. If optimizer requires value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)), where gradient is not defined, user may resolve tie arbitrarily (in our case - return +1 or -1 at its discretion). * NS solver supports numerical differentiation, i.e. it may differentiate your function for you, but it results in 2N increase of function evaluations. Not recommended unless you solve really small problems. See minnscreatef() for more information on this functionality. USAGE: 1. User initializes algorithm state with MinNSCreate() call and chooses what NLC solver to use. There is some solver which is used by default, with default settings, but you should NOT rely on default choice. It may change in future releases of ALGLIB without notice, and no one can guarantee that new solver will be able to solve your problem with default settings. From the other side, if you choose solver explicitly, you can be pretty sure that it will work with new ALGLIB releases. In the current release following solvers can be used: * AGS solver (activated with MinNSSetAlgoAGS() function) 2. User adds boundary and/or linear and/or nonlinear constraints by means of calling one of the following functions: a) MinNSSetBC() for boundary constraints b) MinNSSetLC() for linear constraints c) MinNSSetNLC() for nonlinear constraints You may combine (a), (b) and (c) in one optimization problem. 3. User sets scale of the variables with MinNSSetScale() function. It is VERY important to set scale of the variables, because nonlinearly constrained problems are hard to solve when variables are badly scaled. 4. User sets stopping conditions with MinNSSetCond(). 5. Finally, user calls MinNSOptimize() function which takes algorithm state and pointer (delegate, etc) to callback function which calculates F/G/H. 7. User calls MinNSResults() to get solution 8. Optionally user may call MinNSRestartFrom() to solve another problem with same N but another starting point. MinNSRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. OUTPUT PARAMETERS: State - structure stores algorithm state NOTE: minnscreatef() function may be used if you do not have analytic gradient. This function creates solver which uses numerical differentiation with user-specified step. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnscreate(ae_int_t n, /* Real */ ae_vector* x, minnsstate* state, ae_state *_state) { _minnsstate_clear(state); ae_assert(n>=1, "MinNSCreate: N<1", _state); ae_assert(x->cnt>=n, "MinNSCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[N]: * it is better to set X to a feasible point * but X can be infeasible, in which case algorithm will try to find feasible point first, using X as initial approximation. DiffStep- differentiation step, DiffStep>0. Algorithm performs numerical differentiation with step for I-th variable being equal to DiffStep*S[I] (here S[] is a scale vector, set by minnssetscale() function). Do not use too small steps, because it may lead to catastrophic cancellation during intermediate calculations. OUTPUT PARAMETERS: State - structure stores algorithm state -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnscreatef(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnsstate* state, ae_state *_state) { _minnsstate_clear(state); ae_assert(n>=1, "MinNSCreateF: N<1", _state); ae_assert(x->cnt>=n, "MinNSCreateF: Length(X)n; ae_assert(bndl->cnt>=n, "MinNSSetBC: Length(BndL)cnt>=n, "MinNSSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinNSSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinNSSetBC: BndL contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } } /************************************************************************* This function sets linear constraints. Linear constraints are inactive by default (after initial creation). They are preserved after algorithm restart with minnsrestartfrom(). INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT NOTE: linear (non-bound) constraints are satisfied only approximately: * there always exists some minor violation (about current sampling radius in magnitude during optimization, about EpsX in the solution) due to use of penalty method to handle constraints. * numerical differentiation, if used, may lead to function evaluations outside of the feasible area, because algorithm does NOT change numerical differentiation formula according to linear constraints. If you want constraints to be satisfied exactly, try to reformulate your problem in such manner that all constraints will become boundary ones (this kind of constraints is always satisfied exactly, both in the final solution and in all intermediate points). -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetlc(minnsstate* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_int_t n; ae_int_t i; n = state->n; /* * First, check for errors in the inputs */ ae_assert(k>=0, "MinNSSetLC: K<0", _state); ae_assert(c->cols>=n+1||k==0, "MinNSSetLC: Cols(C)rows>=k, "MinNSSetLC: Rows(C)cnt>=k, "MinNSSetLC: Length(CT)nec = 0; state->nic = 0; return; } /* * Equality constraints are stored first, in the upper * NEC rows of State.CLEIC matrix. Inequality constraints * are stored in the next NIC rows. * * NOTE: we convert inequality constraints to the form * A*x<=b before copying them. */ rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); state->nec = 0; state->nic = 0; for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]==0 ) { ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); state->nec = state->nec+1; } } for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]!=0 ) { if( ct->ptr.p_int[i]>0 ) { ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } else { ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } state->nic = state->nic+1; } } } /************************************************************************* This function sets nonlinear constraints. In fact, this function sets NUMBER of nonlinear constraints. Constraints itself (constraint functions) are passed to minnsoptimize() method. This method requires user-defined vector function F[] and its Jacobian J[], where: * first component of F[] and first row of Jacobian J[] correspond to function being minimized * next NLEC components of F[] (and rows of J) correspond to nonlinear equality constraints G_i(x)=0 * next NLIC components of F[] (and rows of J) correspond to nonlinear inequality constraints H_i(x)<=0 NOTE: you may combine nonlinear constraints with linear/boundary ones. If your problem has mixed constraints, you may explicitly specify some of them as linear ones. It may help optimizer to handle them more efficiently. INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 NOTE 1: nonlinear constraints are satisfied only approximately! It is possible that algorithm will evaluate function outside of the feasible area! NOTE 2: algorithm scales variables according to scale specified by minnssetscale() function, so it can handle problems with badly scaled variables (as long as we KNOW their scales). However, there is no way to automatically scale nonlinear constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may ruin convergence. Solving problem with constraint "1000*G0(x)=0" is NOT same as solving it with constraint "0.001*G0(x)=0". It means that YOU are the one who is responsible for correct scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you to scale nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) has approximately unit magnitude (for problems with unit scale) or has magnitude approximately equal to 1/S[i] (where S is a scale set by minnssetscale() function). NOTE 3: nonlinear constraints are always hard to handle, no matter what algorithm you try to use. Even basic box/linear constraints modify function curvature by adding valleys and ridges. However, nonlinear constraints add valleys which are very hard to follow due to their "curved" nature. It means that optimization with single nonlinear constraint may be significantly slower than optimization with multiple linear ones. It is normal situation, and we recommend you to carefully choose Rho parameter of minnssetalgoags(), because too large value may slow down convergence. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetnlc(minnsstate* state, ae_int_t nlec, ae_int_t nlic, ae_state *_state) { ae_assert(nlec>=0, "MinNSSetNLC: NLEC<0", _state); ae_assert(nlic>=0, "MinNSSetNLC: NLIC<0", _state); state->ng = nlec; state->nh = nlic; ae_vector_set_length(&state->fi, 1+state->ng+state->nh, _state); ae_matrix_set_length(&state->j, 1+state->ng+state->nh, state->n, _state); } /************************************************************************* This function sets stopping conditions for iterations of optimizer. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The AGS solver finishes its work if on k+1-th iteration sampling radius decreases below EpsX. MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection. We do not recommend you to rely on default choice in production code. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetcond(minnsstate* state, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsx, _state), "MinNSSetCond: EpsX is not finite number", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinNSSetCond: negative EpsX", _state); ae_assert(maxits>=0, "MinNSSetCond: negative MaxIts!", _state); if( ae_fp_eq(epsx,(double)(0))&&maxits==0 ) { epsx = 1.0E-6; } state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function sets scaling coefficients for NLC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetscale(minnsstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinNSSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinNSSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinNSSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* This function tells MinNS unit to use AGS (adaptive gradient sampling) algorithm for nonsmooth constrained optimization. This algorithm is a slight modification of one described in "An Adaptive Gradient Sampling Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez. This optimizer has following benefits and drawbacks: + robustness; it can be used with nonsmooth and nonconvex functions. + relatively easy tuning; most of the metaparameters are easy to select. - it has convergence of steepest descent, slower than CG/LBFGS. - each iteration involves evaluation of ~2N gradient values and solution of 2Nx2N quadratic programming problem, which limits applicability of algorithm by small-scale problems (up to 50-100). IMPORTANT: this algorithm has convergence guarantees, i.e. it will steadily move towards some stationary point of the function. However, "stationary point" does not always mean "solution". Nonsmooth problems often have "flat spots", i.e. areas where function do not change at all. Such "flat spots" are stationary points by definition, and algorithm may be caught here. Nonsmooth CONVEX tasks are not prone to this problem. Say, if your function has form f()=MAX(f0,f1,...), and f_i are convex, then f() is convex too and you have guaranteed convergence to solution. INPUT PARAMETERS: State - structure which stores algorithm state Radius - initial sampling radius, >=0. Internally multiplied by vector of per-variable scales specified by minnssetscale()). You should select relatively large sampling radius, roughly proportional to scaled length of the first steps of the algorithm. Something close to 0.1 in magnitude should be good for most problems. AGS solver can automatically decrease radius, so too large radius is not a problem (assuming that you won't choose so large radius that algorithm will sample function in too far away points, where gradient value is irrelevant). Too small radius won't cause algorithm to fail, but it may slow down algorithm (it may have to perform too short steps). Penalty - penalty coefficient for nonlinear constraints: * for problem with nonlinear constraints should be some problem-specific positive value, large enough that penalty term changes shape of the function. Starting from some problem-specific value penalty coefficient becomes large enough to exactly enforce nonlinear constraints; larger values do not improve precision. Increasing it too much may slow down convergence, so you should choose it carefully. * can be zero for problems WITHOUT nonlinear constraints (i.e. for unconstrained ones or ones with just box or linear constraints) * if you specify zero value for problem with at least one nonlinear constraint, algorithm will terminate with error code -1. ALGORITHM OUTLINE The very basic outline of unconstrained AGS algorithm is given below: 0. If sampling radius is below EpsX or we performed more then MaxIts iterations - STOP. 1. sample O(N) gradient values at random locations around current point; informally speaking, this sample is an implicit piecewise linear model of the function, although algorithm formulation does not mention that explicitly 2. solve quadratic programming problem in order to find descent direction 3. if QP solver tells us that we are near solution, decrease sampling radius and move to (0) 4. perform backtracking line search 5. after moving to new point, goto (0) As for the constraints: * box constraints are handled exactly by modification of the function being minimized * linear/nonlinear constraints are handled by adding L1 penalty. Because our solver can handle nonsmoothness, we can use L1 penalty function, which is an exact one (i.e. exact solution is returned under such penalty). * penalty coefficient for linear constraints is chosen automatically; however, penalty coefficient for nonlinear constraints must be specified by user. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnssetalgoags(minnsstate* state, double radius, double penalty, ae_state *_state) { ae_assert(ae_isfinite(radius, _state), "MinNSSetAlgoAGS: Radius is not finite", _state); ae_assert(ae_fp_greater(radius,(double)(0)), "MinNSSetAlgoAGS: Radius<=0", _state); ae_assert(ae_isfinite(penalty, _state), "MinNSSetAlgoAGS: Penalty is not finite", _state); ae_assert(ae_fp_greater_eq(penalty,0.0), "MinNSSetAlgoAGS: Penalty<0", _state); state->agsrhononlinear = penalty; state->agsradius = radius; state->solvertype = 0; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to minnsoptimize(). -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minnssetxrep(minnsstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsrequesttermination(minnsstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied Jacobian, and one which uses only function vector and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object you should choose appropriate variant of minnsoptimize() - one which accepts function AND Jacobian or one which accepts ONLY function. Be careful to choose variant of minnsoptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to minnsoptimize() and specific function used to create optimizer. | USER PASSED TO minnsoptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ minnscreatef() | works FAILS minnscreate() | FAILS works Here "FAILS" denotes inappropriate combinations of optimizer creation function and minnsoptimize() version. Attemps to use such combination will lead to exception. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ ae_bool minnsiteration(minnsstate* state, ae_state *_state) { ae_int_t i; ae_int_t k; ae_int_t n; ae_int_t ng; ae_int_t nh; double v; double xp; double xm; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { i = state->rstate.ia.ptr.p_int[0]; k = state->rstate.ia.ptr.p_int[1]; n = state->rstate.ia.ptr.p_int[2]; ng = state->rstate.ia.ptr.p_int[3]; nh = state->rstate.ia.ptr.p_int[4]; v = state->rstate.ra.ptr.p_double[0]; xp = state->rstate.ra.ptr.p_double[1]; xm = state->rstate.ra.ptr.p_double[2]; } else { i = 359; k = -58; n = -919; ng = -909; nh = 81; v = 255; xp = 74; xm = -788; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } /* * Routine body */ /* * Init */ state->replcerr = 0.0; state->repnlcerr = 0.0; state->repterminationtype = 0; state->repinneriterationscount = 0; state->repouteriterationscount = 0; state->repnfev = 0; state->repvaridx = 0; state->repfuncidx = 0; state->userterminationneeded = ae_false; state->dbgncholesky = 0; n = state->n; ng = state->ng; nh = state->nh; minns_clearrequestfields(state, _state); /* * AGS solver */ if( state->solvertype!=0 ) { goto lbl_4; } if( ae_fp_neq(state->diffstep,(double)(0)) ) { rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->fm, 1+ng+nh, _state); rvectorsetlengthatleast(&state->fp, 1+ng+nh, _state); } ae_vector_set_length(&state->rstateags.ia, 13+1, _state); ae_vector_set_length(&state->rstateags.ba, 3+1, _state); ae_vector_set_length(&state->rstateags.ra, 9+1, _state); state->rstateags.stage = -1; lbl_6: if( !minns_agsiteration(state, _state) ) { goto lbl_7; } /* * Numerical differentiation (if needed) - intercept NeedFiJ * request and replace it by sequence of NeedFi requests */ if( !(ae_fp_neq(state->diffstep,(double)(0))&&state->needfij) ) { goto lbl_8; } state->needfij = ae_false; state->needfi = ae_true; ae_v_move(&state->xbase.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); k = 0; lbl_10: if( k>n-1 ) { goto lbl_12; } v = state->xbase.ptr.p_double[k]; xm = v-state->diffstep*state->s.ptr.p_double[k]; xp = v+state->diffstep*state->s.ptr.p_double[k]; if( state->hasbndl.ptr.p_bool[k]&&ae_fp_less(xm,state->bndl.ptr.p_double[k]) ) { xm = state->bndl.ptr.p_double[k]; } if( state->hasbndu.ptr.p_bool[k]&&ae_fp_greater(xp,state->bndu.ptr.p_double[k]) ) { xp = state->bndu.ptr.p_double[k]; } ae_assert(ae_fp_less_eq(xm,xp), "MinNS: integrity check failed", _state); if( ae_fp_eq(xm,xp) ) { goto lbl_13; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = xm; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: ae_v_move(&state->fm.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = xp; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: ae_v_move(&state->fp.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_move(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fp.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); ae_v_sub(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fm.ptr.p_double[0], 1, ae_v_len(0,ng+nh)); v = 1/(xp-xm); ae_v_muld(&state->j.ptr.pp_double[0][k], state->j.stride, ae_v_len(0,ng+nh), v); state->repnfev = state->repnfev+2; goto lbl_14; lbl_13: for(i=0; i<=ng+nh; i++) { state->j.ptr.pp_double[i][k] = 0.0; } lbl_14: k = k+1; goto lbl_10; lbl_12: ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->rstate.stage = 2; goto lbl_rcomm; lbl_2: /* * Restore previous values of fields and continue */ state->needfi = ae_false; state->needfij = ae_true; goto lbl_6; lbl_8: /* * Forward request to caller */ state->rstate.stage = 3; goto lbl_rcomm; lbl_3: inc(&state->repnfev, _state); goto lbl_6; lbl_7: result = ae_false; return result; lbl_4: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = i; state->rstate.ia.ptr.p_int[1] = k; state->rstate.ia.ptr.p_int[2] = n; state->rstate.ia.ptr.p_int[3] = ng; state->rstate.ia.ptr.p_int[4] = nh; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = xp; state->rstate.ra.ptr.p_double[2] = xm; return result; } /************************************************************************* MinNS results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report. You should check Rep.TerminationType in order to distinguish successful termination from unsuccessful one: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -3 box constraints are inconsistent * -1 inconsistent parameters were passed: * penalty parameter for minnssetalgoags() is zero, but we have nonlinear constraints set by minnssetnlc() * 2 sampling radius decreased below epsx * 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. * 8 User requested termination via minnsrequesttermination() -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsresults(minnsstate* state, /* Real */ ae_vector* x, minnsreport* rep, ae_state *_state) { ae_vector_clear(x); _minnsreport_clear(rep); minnsresultsbuf(state, x, rep, _state); } /************************************************************************* Buffered implementation of minnsresults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsresultsbuf(minnsstate* state, /* Real */ ae_vector* x, minnsreport* rep, ae_state *_state) { ae_int_t i; if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } rep->iterationscount = state->repinneriterationscount; rep->nfev = state->repnfev; rep->varidx = state->repvaridx; rep->funcidx = state->repfuncidx; rep->terminationtype = state->repterminationtype; rep->cerr = ae_maxreal(state->replcerr, state->repnlcerr, _state); rep->lcerr = state->replcerr; rep->nlcerr = state->repnlcerr; if( state->repterminationtype>0 ) { ae_v_move(&x->ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); } else { for(i=0; i<=state->n-1; i++) { x->ptr.p_double[i] = _state->v_nan; } } } /************************************************************************* This subroutine restarts algorithm from new point. All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure previously allocated with minnscreate() call. X - new starting point. -- ALGLIB -- Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ void minnsrestartfrom(minnsstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t n; n = state->n; /* * First, check for errors in the inputs */ ae_assert(x->cnt>=n, "MinNSRestartFrom: Length(X)xstart.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * prepare RComm facilities */ ae_vector_set_length(&state->rstate.ia, 4+1, _state); ae_vector_set_length(&state->rstate.ra, 2+1, _state); state->rstate.stage = -1; minns_clearrequestfields(state, _state); } /************************************************************************* Clears request fileds (to be sure that we don't forget to clear something) *************************************************************************/ static void minns_clearrequestfields(minnsstate* state, ae_state *_state) { state->needfi = ae_false; state->needfij = ae_false; state->xupdated = ae_false; } /************************************************************************* Internal initialization subroutine. Sets default NLC solver with default criteria. *************************************************************************/ static void minns_minnsinitinternal(ae_int_t n, /* Real */ ae_vector* x, double diffstep, minnsstate* state, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_matrix c; ae_vector ct; ae_frame_make(_state, &_frame_block); memset(&c, 0, sizeof(c)); memset(&ct, 0, sizeof(ct)); ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); ae_vector_init(&ct, 0, DT_INT, _state, ae_true); state->agsinitstp = 0.2; state->agsstattold = 1.0E-10; state->agsshortstpabs = 1.0E-10; state->agsshortstprel = 0.75; state->agsshortf = 10*ae_machineepsilon; state->agsrhononlinear = 0.0; state->agsraddecay = 0.2; state->agsalphadecay = 0.5; state->agsdecrease = 0.1; state->agsmaxraddecays = 50; state->agsmaxbacktrack = 20; state->agsmaxbacktracknonfull = 8; state->agspenaltylevel = 50.0; state->agspenaltyincrease = 100.0; state->agsminupdate = ae_maxint(5, n/2, _state); state->agssamplesize = ae_maxint(2*n+1, state->agsminupdate+1, _state); state->agsshortlimit = 4+state->agssamplesize/state->agsminupdate; /* * Initialize other params */ state->n = n; state->diffstep = diffstep; ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->hasbndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->hasbndu, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->xstart, n, _state); ae_vector_set_length(&state->xc, n, _state); ae_vector_set_length(&state->xn, n, _state); ae_vector_set_length(&state->d, n, _state); ae_vector_set_length(&state->x, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->hasbndl.ptr.p_bool[i] = ae_false; state->bndu.ptr.p_double[i] = _state->v_posinf; state->hasbndu.ptr.p_bool[i] = ae_false; state->s.ptr.p_double[i] = 1.0; state->xstart.ptr.p_double[i] = x->ptr.p_double[i]; state->xc.ptr.p_double[i] = x->ptr.p_double[i]; } minnssetlc(state, &c, &ct, 0, _state); minnssetnlc(state, 0, 0, _state); minnssetcond(state, 0.0, 0, _state); minnssetxrep(state, ae_false, _state); minnssetalgoags(state, 0.1, 1000.0, _state); minnsrestartfrom(state, x, _state); ae_frame_leave(_state); } /************************************************************************* This function performs actual processing for AUL algorith. It expects that caller redirects its reverse communication requests NeedFiJ/XUpdated to external user who will provide analytic derivative (or handle reports about progress). In case external user does not have analytic derivative, it is responsibility of caller to intercept NeedFiJ request and replace it with appropriate numerical differentiation scheme. -- ALGLIB -- Copyright 06.06.2015 by Bochkanov Sergey *************************************************************************/ static ae_bool minns_agsiteration(minnsstate* state, ae_state *_state) { ae_int_t n; ae_int_t nec; ae_int_t nic; ae_int_t ng; ae_int_t nh; ae_int_t i; ae_int_t j; ae_int_t k; double radius0; double radius; ae_int_t radiusdecays; double alpha; double recommendedstep; double dnrm; double dg; double v; double vv; ae_int_t maxsamplesize; ae_int_t cursamplesize; double v0; double v1; ae_bool restartneeded; ae_bool b; ae_bool alphadecreased; ae_int_t shortstepscnt; ae_int_t backtrackits; ae_int_t maxbacktrackits; ae_bool fullsample; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstateags.stage>=0 ) { n = state->rstateags.ia.ptr.p_int[0]; nec = state->rstateags.ia.ptr.p_int[1]; nic = state->rstateags.ia.ptr.p_int[2]; ng = state->rstateags.ia.ptr.p_int[3]; nh = state->rstateags.ia.ptr.p_int[4]; i = state->rstateags.ia.ptr.p_int[5]; j = state->rstateags.ia.ptr.p_int[6]; k = state->rstateags.ia.ptr.p_int[7]; radiusdecays = state->rstateags.ia.ptr.p_int[8]; maxsamplesize = state->rstateags.ia.ptr.p_int[9]; cursamplesize = state->rstateags.ia.ptr.p_int[10]; shortstepscnt = state->rstateags.ia.ptr.p_int[11]; backtrackits = state->rstateags.ia.ptr.p_int[12]; maxbacktrackits = state->rstateags.ia.ptr.p_int[13]; restartneeded = state->rstateags.ba.ptr.p_bool[0]; b = state->rstateags.ba.ptr.p_bool[1]; alphadecreased = state->rstateags.ba.ptr.p_bool[2]; fullsample = state->rstateags.ba.ptr.p_bool[3]; radius0 = state->rstateags.ra.ptr.p_double[0]; radius = state->rstateags.ra.ptr.p_double[1]; alpha = state->rstateags.ra.ptr.p_double[2]; recommendedstep = state->rstateags.ra.ptr.p_double[3]; dnrm = state->rstateags.ra.ptr.p_double[4]; dg = state->rstateags.ra.ptr.p_double[5]; v = state->rstateags.ra.ptr.p_double[6]; vv = state->rstateags.ra.ptr.p_double[7]; v0 = state->rstateags.ra.ptr.p_double[8]; v1 = state->rstateags.ra.ptr.p_double[9]; } else { n = 809; nec = 205; nic = -838; ng = 939; nh = -526; i = 763; j = -541; k = -698; radiusdecays = -900; maxsamplesize = -318; cursamplesize = -940; shortstepscnt = 1016; backtrackits = -229; maxbacktrackits = -536; restartneeded = ae_true; b = ae_true; alphadecreased = ae_false; fullsample = ae_false; radius0 = -722; radius = -413; alpha = -461; recommendedstep = 927; dnrm = 201; dg = 922; v = -154; vv = 306; v0 = -1011; v1 = 951; } if( state->rstateags.stage==0 ) { goto lbl_0; } if( state->rstateags.stage==1 ) { goto lbl_1; } if( state->rstateags.stage==2 ) { goto lbl_2; } if( state->rstateags.stage==3 ) { goto lbl_3; } /* * Routine body */ ae_assert(state->solvertype==0, "MinNS: internal error", _state); n = state->n; nec = state->nec; nic = state->nic; ng = state->ng; nh = state->nh; /* * Check consistency of parameters */ if( ng+nh>0&&ae_fp_eq(state->agsrhononlinear,(double)(0)) ) { state->repterminationtype = -1; result = ae_false; return result; } /* * Allocate arrays. */ rvectorsetlengthatleast(&state->colmax, n, _state); rvectorsetlengthatleast(&state->diagh, n, _state); rvectorsetlengthatleast(&state->signmin, n, _state); rvectorsetlengthatleast(&state->signmax, n, _state); maxsamplesize = state->agssamplesize; rmatrixsetlengthatleast(&state->samplex, maxsamplesize+1, n, _state); rmatrixsetlengthatleast(&state->samplegm, maxsamplesize+1, n, _state); rmatrixsetlengthatleast(&state->samplegmbc, maxsamplesize+1, n, _state); rvectorsetlengthatleast(&state->samplef, maxsamplesize+1, _state); rvectorsetlengthatleast(&state->samplef0, maxsamplesize+1, _state); rvectorsetlengthatleast(&state->grs, n, _state); /* * Prepare optimizer */ rvectorsetlengthatleast(&state->tmp0, maxsamplesize, _state); rvectorsetlengthatleast(&state->tmp1, maxsamplesize, _state); ivectorsetlengthatleast(&state->tmp3, 1, _state); rmatrixsetlengthatleast(&state->tmp2, 1, maxsamplesize+1, _state); for(i=0; i<=maxsamplesize-1; i++) { state->tmp0.ptr.p_double[i] = 0.0; state->tmp1.ptr.p_double[i] = _state->v_posinf; } /* * Prepare RNG, seed it with fixed values so * that each run on same problem yeilds same results */ hqrndseed(7235, 98532, &state->agsrs, _state); /* * Prepare initial point subject to current bound constraints and * perform scaling of bound constraints, linear constraints, point itself */ rvectorsetlengthatleast(&state->scaledbndl, n, _state); rvectorsetlengthatleast(&state->scaledbndu, n, _state); for(i=0; i<=n-1; i++) { /* * Check and scale constraints */ if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_less(state->bndu.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->repterminationtype = -3; result = ae_false; return result; } if( state->hasbndl.ptr.p_bool[i] ) { state->scaledbndl.ptr.p_double[i] = state->bndl.ptr.p_double[i]/state->s.ptr.p_double[i]; } else { state->scaledbndl.ptr.p_double[i] = _state->v_neginf; } if( state->hasbndu.ptr.p_bool[i] ) { state->scaledbndu.ptr.p_double[i] = state->bndu.ptr.p_double[i]/state->s.ptr.p_double[i]; } else { state->scaledbndu.ptr.p_double[i] = _state->v_posinf; } if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) { ae_assert(ae_fp_less_eq(state->scaledbndl.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]), "MinNS: integrity check failed", _state); } if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { ae_assert(ae_fp_eq(state->scaledbndl.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]), "MinNS: integrity check failed", _state); } /* * Scale and constrain point */ state->xc.ptr.p_double[i] = state->xstart.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->scaledbndl.ptr.p_double[i]; continue; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->scaledbndu.ptr.p_double[i]; continue; } state->xc.ptr.p_double[i] = state->xc.ptr.p_double[i]/state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xc.ptr.p_double[i],state->scaledbndl.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->scaledbndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xc.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]) ) { state->xc.ptr.p_double[i] = state->scaledbndu.ptr.p_double[i]; } } rmatrixsetlengthatleast(&state->scaledcleic, nec+nic, n+1, _state); rvectorsetlengthatleast(&state->rholinear, nec+nic, _state); for(i=0; i<=nec+nic-1; i++) { /* * Initial value of penalty coefficient is zero */ state->rholinear.ptr.p_double[i] = 0.0; /* * Scale and normalize linear constraints */ vv = 0.0; for(j=0; j<=n-1; j++) { v = state->cleic.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; state->scaledcleic.ptr.pp_double[i][j] = v; vv = vv+v*v; } vv = ae_sqrt(vv, _state); state->scaledcleic.ptr.pp_double[i][n] = state->cleic.ptr.pp_double[i][n]; if( ae_fp_greater(vv,(double)(0)) ) { for(j=0; j<=n; j++) { state->scaledcleic.ptr.pp_double[i][j] = state->scaledcleic.ptr.pp_double[i][j]/vv; } } } /* * Main cycle * * We maintain several variables during iteration: * * RecommendedStep- current estimate of recommended step length; * must be Radius0 on first entry * * Radius - current sampling radius * * CurSampleSize - current sample size (may change in future versions) * * FullSample - whether we have full sample, or only partial one * * RadiusDecays - total number of decreases performed for sampling radius */ radius = state->agsradius; radius0 = radius; recommendedstep = ae_minreal(radius0, state->agsinitstp, _state); cursamplesize = 1; radiusdecays = 0; shortstepscnt = 0; fullsample = ae_false; lbl_4: if( ae_false ) { goto lbl_5; } /* * First phase of iteration - central point: * * 1. evaluate function at central point - first entry in sample. * Its status is ignored, it is always recalculated. * 2. report point and check gradient/function value for NAN/INF * 3. check penalty coefficients for linear terms; increase them * if directional derivative of function being optimized (not * merit function!) is larger than derivative of penalty. * 4. update report on constraint violation */ cursamplesize = ae_maxint(cursamplesize, 1, _state); ae_v_move(&state->samplex.ptr.pp_double[0][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); minns_unscalepointbc(state, &state->x, _state); minns_clearrequestfields(state, _state); state->needfij = ae_true; state->rstateags.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; state->replcerr = 0.0; for(i=0; i<=nec+nic-1; i++) { v = -state->scaledcleic.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = v+state->scaledcleic.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; } if( i>=nec&&ae_fp_less_eq(v,(double)(0)) ) { continue; } state->replcerr = ae_maxreal(state->replcerr, ae_fabs(v, _state), _state); } state->repnlcerr = 0.0; for(i=1; i<=ng+nh; i++) { v = state->fi.ptr.p_double[i]; if( i>ng&&ae_fp_less_eq(v,(double)(0)) ) { continue; } state->repnlcerr = ae_maxreal(state->repnlcerr, ae_fabs(v, _state), _state); } for(j=0; j<=n-1; j++) { state->grs.ptr.p_double[j] = state->j.ptr.pp_double[0][j]*state->s.ptr.p_double[j]; } minns_generatemeritfunction(state, 0, _state); if( !state->xrep ) { goto lbl_6; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->samplef0.ptr.p_double[0]; minns_unscalepointbc(state, &state->x, _state); minns_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstateags.stage = 1; goto lbl_rcomm; lbl_1: state->xupdated = ae_false; lbl_6: if( state->userterminationneeded ) { /* * User requested termination */ state->repterminationtype = 8; goto lbl_5; } v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->samplegm.ptr.pp_double[0][i], _state); } if( !ae_isfinite(v, _state)||!ae_isfinite(state->samplef.ptr.p_double[0], _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; goto lbl_5; } restartneeded = ae_false; for(i=0; i<=nec+nic-1; i++) { /* * Evaluate penalty function. * * Skip update if penalty is satisfied exactly (this check * also covers situations when I-th row is exactly zero). */ v = -state->scaledcleic.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = v+state->scaledcleic.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; } if( i=nec&&ae_fp_less_eq(v,(double)(0)) ) { continue; } /* * Calculate directional derivative, compare it with threshold. * * NOTE: we rely on the fact that ScaledCLEIC is normalized */ ae_assert(ae_fp_greater(state->agspenaltylevel,1.0), "MinNS: integrity error", _state); ae_assert(ae_fp_greater(state->agspenaltyincrease,state->agspenaltylevel), "MinNS: integrity error", _state); v = 0.0; for(j=0; j<=n-1; j++) { v = v+state->grs.ptr.p_double[j]*state->scaledcleic.ptr.pp_double[i][j]; } v = ae_fabs(v, _state); if( ae_fp_greater(v*state->agspenaltylevel,state->rholinear.ptr.p_double[i]) ) { state->rholinear.ptr.p_double[i] = v*state->agspenaltyincrease; restartneeded = ae_true; } } if( restartneeded ) { cursamplesize = 0; goto lbl_4; } /* * Check stopping conditions. */ if( radiusdecays>=state->agsmaxraddecays ) { /* * Too many attempts to decrease radius */ state->repterminationtype = 7; goto lbl_5; } if( state->repinneriterationscount>=state->maxits&&state->maxits>0 ) { /* * Too many iterations */ state->repterminationtype = 5; goto lbl_5; } if( ae_fp_less_eq(radius,state->epsx*state->agsraddecay) ) { /* * Radius is smaller than required step tolerance multiplied by radius decay. * * Additional decay is required in order to make sure that optimization session * with radius equal to EpsX was successfully done. */ state->repterminationtype = 2; goto lbl_5; } /* * Update sample: * * 1. invalidate entries which are too far away from XC * and move all valid entries to beginning of the sample. * 2. add new entries until we have AGSSampleSize * items in our sample. We remove oldest entries from * sample until we have enough place to add at least * AGSMinUpdate items. * 3. prepare "modified" gradient sample with respect to * boundary constraints. */ ae_assert(cursamplesize>=1, "MinNS: integrity check failed", _state); k = 1; for(i=1; i<=cursamplesize-1; i++) { /* * If entry is outside of Radius-ball around XC, discard it. */ v = 0.0; for(j=0; j<=n-1; j++) { v = ae_maxreal(v, ae_fabs(state->samplex.ptr.pp_double[i][j]-state->xc.ptr.p_double[j], _state), _state); } if( ae_fp_greater(v,radius) ) { continue; } /* * If central point is exactly at boundary, and corresponding * component of entry is OUT of boundary, entry is discarded. */ b = ae_false; for(j=0; j<=n-1; j++) { b = b||((state->hasbndl.ptr.p_bool[j]&&ae_fp_eq(state->xc.ptr.p_double[j],state->scaledbndl.ptr.p_double[j]))&&ae_fp_neq(state->samplex.ptr.pp_double[i][j],state->scaledbndl.ptr.p_double[j])); b = b||((state->hasbndu.ptr.p_bool[j]&&ae_fp_eq(state->xc.ptr.p_double[j],state->scaledbndu.ptr.p_double[j]))&&ae_fp_neq(state->samplex.ptr.pp_double[i][j],state->scaledbndu.ptr.p_double[j])); } if( b ) { continue; } /* * Move to the beginning */ ae_v_move(&state->samplex.ptr.pp_double[k][0], 1, &state->samplex.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); ae_v_move(&state->samplegm.ptr.pp_double[k][0], 1, &state->samplegm.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); state->samplef.ptr.p_double[k] = state->samplef.ptr.p_double[i]; state->samplef0.ptr.p_double[k] = state->samplef0.ptr.p_double[i]; k = k+1; } cursamplesize = k; if( state->agssamplesize-cursamplesizeagsminupdate ) { /* * Remove oldest entries */ k = state->agsminupdate-(state->agssamplesize-cursamplesize); ae_assert(k<=cursamplesize-1, "MinNS: integrity check failed", _state); for(i=1; i<=cursamplesize-k-1; i++) { ae_v_move(&state->samplex.ptr.pp_double[i][0], 1, &state->samplex.ptr.pp_double[i+k][0], 1, ae_v_len(0,n-1)); ae_v_move(&state->samplegm.ptr.pp_double[i][0], 1, &state->samplegm.ptr.pp_double[i+k][0], 1, ae_v_len(0,n-1)); state->samplef.ptr.p_double[i] = state->samplef.ptr.p_double[i+k]; state->samplef0.ptr.p_double[i] = state->samplef0.ptr.p_double[i+k]; } cursamplesize = cursamplesize-k; } k = 0; i = cursamplesize; lbl_8: if( i>ae_minint(cursamplesize+state->agsminupdate, state->agssamplesize, _state)-1 ) { goto lbl_10; } for(j=0; j<=n-1; j++) { /* * Undistorted position */ state->samplex.ptr.pp_double[i][j] = state->xc.ptr.p_double[j]; /* * Do not apply distortion, if we are exactly at boundary constraint. */ if( (state->hasbndl.ptr.p_bool[j]&&state->hasbndu.ptr.p_bool[j])&&ae_fp_eq(state->scaledbndl.ptr.p_double[j],state->scaledbndu.ptr.p_double[j]) ) { continue; } if( state->hasbndl.ptr.p_bool[j]&&ae_fp_eq(state->samplex.ptr.pp_double[i][j],state->scaledbndl.ptr.p_double[j]) ) { continue; } if( state->hasbndu.ptr.p_bool[j]&&ae_fp_eq(state->samplex.ptr.pp_double[i][j],state->scaledbndu.ptr.p_double[j]) ) { continue; } /* * Apply distortion */ if( ae_fp_greater_eq(hqrnduniformr(&state->agsrs, _state),0.5) ) { /* * Sample at the left side with 50% probability */ v0 = state->samplex.ptr.pp_double[i][j]-radius; v1 = state->samplex.ptr.pp_double[i][j]; if( state->hasbndl.ptr.p_bool[j] ) { v0 = ae_maxreal(state->scaledbndl.ptr.p_double[j], v0, _state); } } else { /* * Sample at the right side with 50% probability */ v0 = state->samplex.ptr.pp_double[i][j]; v1 = state->samplex.ptr.pp_double[i][j]+radius; if( state->hasbndu.ptr.p_bool[j] ) { v1 = ae_minreal(state->scaledbndu.ptr.p_double[j], v1, _state); } } ae_assert(ae_fp_greater_eq(v1,v0), "MinNS: integrity check failed", _state); state->samplex.ptr.pp_double[i][j] = boundval(v0+(v1-v0)*hqrnduniformr(&state->agsrs, _state), v0, v1, _state); } ae_v_move(&state->x.ptr.p_double[0], 1, &state->samplex.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); minns_unscalepointbc(state, &state->x, _state); minns_clearrequestfields(state, _state); state->needfij = ae_true; state->rstateags.stage = 2; goto lbl_rcomm; lbl_2: state->needfij = ae_false; minns_generatemeritfunction(state, i, _state); k = k+1; i = i+1; goto lbl_8; lbl_10: cursamplesize = cursamplesize+k; fullsample = cursamplesize==state->agssamplesize; for(j=0; j<=cursamplesize-1; j++) { /* * For J-th element in gradient sample, process all of its components * and modify them according to status of box constraints */ for(i=0; i<=n-1; i++) { ae_assert(!state->hasbndl.ptr.p_bool[i]||ae_fp_greater_eq(state->xc.ptr.p_double[i],state->scaledbndl.ptr.p_double[i]), "MinNS: integrity error", _state); ae_assert(!state->hasbndu.ptr.p_bool[i]||ae_fp_less_eq(state->xc.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]), "MinNS: integrity error", _state); state->samplegmbc.ptr.pp_double[j][i] = state->samplegm.ptr.pp_double[j][i]; if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->scaledbndl.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]) ) { /* * I-th box constraint is of equality type (lower bound matches upper one). * Simplest case, always active. */ state->samplegmbc.ptr.pp_double[j][i] = 0.0; continue; } if( state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->scaledbndl.ptr.p_double[i]) ) { /* * We are at lower bound. * * A bit more complex: * * first, we have to activate/deactivate constraint depending on gradient at XC * * second, in any case, I-th column of gradient sample must be non-positive */ if( ae_fp_greater_eq(state->samplegm.ptr.pp_double[0][i],0.0) ) { state->samplegmbc.ptr.pp_double[j][i] = 0.0; } state->samplegmbc.ptr.pp_double[j][i] = ae_minreal(state->samplegmbc.ptr.pp_double[j][i], 0.0, _state); continue; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->scaledbndu.ptr.p_double[i]) ) { /* * We are at upper bound. * * A bit more complex: * * first, we have to activate/deactivate constraint depending on gradient at XC * * second, in any case, I-th column of gradient sample must be non-negative */ if( ae_fp_less_eq(state->samplegm.ptr.pp_double[0][i],0.0) ) { state->samplegmbc.ptr.pp_double[j][i] = 0.0; } state->samplegmbc.ptr.pp_double[j][i] = ae_maxreal(state->samplegmbc.ptr.pp_double[j][i], 0.0, _state); continue; } } } /* * Calculate diagonal Hessian. * * This Hessian serves two purposes: * * first, it improves performance of gradient descent step * * second, it improves condition number of QP subproblem * solved to determine step * * The idea is that for each variable we check whether sample * includes entries with alternating sign of gradient: * * if gradients with different signs are present, Hessian * component is set to M/R, where M is a maximum magnitude * of corresponding gradient component, R is a sampling radius. * Note that sign=0 and sign=1 are treated as different ones * * if all gradients have same sign, Hessian component is * set to M/R0, where R0 is initial sampling radius. */ for(j=0; j<=n-1; j++) { state->colmax.ptr.p_double[j] = 0.0; state->signmin.ptr.p_double[j] = (double)(1); state->signmax.ptr.p_double[j] = (double)(-1); } for(i=0; i<=cursamplesize-1; i++) { for(j=0; j<=n-1; j++) { v = state->samplegmbc.ptr.pp_double[i][j]; state->colmax.ptr.p_double[j] = ae_maxreal(state->colmax.ptr.p_double[j], ae_fabs(v, _state), _state); state->signmin.ptr.p_double[j] = ae_minreal(state->signmin.ptr.p_double[j], (double)(ae_sign(v, _state)), _state); state->signmax.ptr.p_double[j] = ae_maxreal(state->signmax.ptr.p_double[j], (double)(ae_sign(v, _state)), _state); } } for(j=0; j<=n-1; j++) { if( ae_fp_neq(state->signmin.ptr.p_double[j],state->signmax.ptr.p_double[j]) ) { /* * Alternating signs of gradient - step is proportional to current sampling radius */ ae_assert(ae_fp_neq(state->colmax.ptr.p_double[j],(double)(0)), "MinNS: integrity check failed", _state); ae_assert(ae_fp_neq(radius,(double)(0)), "MinNS: integrity check failed", _state); state->diagh.ptr.p_double[j] = state->colmax.ptr.p_double[j]/radius; continue; } if( ae_fp_neq(state->colmax.ptr.p_double[j],(double)(0)) ) { /* * Non-alternating sign of gradient, but non-zero. * Step is proportional to initial sampling radius */ ae_assert(ae_fp_neq(radius0,(double)(0)), "MinNS: integrity check failed", _state); state->diagh.ptr.p_double[j] = state->colmax.ptr.p_double[j]/radius0; continue; } state->diagh.ptr.p_double[j] = (double)(1); } /* * PROJECTION PHASE * * We project zero vector on convex hull of gradient sample. * If projection is small enough, we decrease radius and restart. * Otherwise, this phase returns search direction in State.D. * * NOTE: because we use iterative solver, it may have trouble * dealing with ill-conditioned problems. So we also employ * second, backup test for stationarity - when too many * subsequent backtracking searches resulted in short steps. */ minns_solveqp(&state->samplegmbc, &state->diagh, cursamplesize, n, &state->tmp0, &state->dbgncholesky, &state->nsqp, _state); for(j=0; j<=n-1; j++) { state->d.ptr.p_double[j] = 0.0; } for(i=0; i<=cursamplesize-1; i++) { v = state->tmp0.ptr.p_double[i]; ae_v_addd(&state->d.ptr.p_double[0], 1, &state->samplegmbc.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } v = 0.0; for(j=0; j<=n-1; j++) { v = ae_maxreal(v, ae_fabs(state->d.ptr.p_double[j]/coalesce(state->colmax.ptr.p_double[j], 1.0, _state), _state), _state); } if( ae_fp_less_eq(v,state->agsstattold) ) { /* * Stationarity test succeded. * Decrease radius and restart. * * NOTE: we also clear ShortStepsCnt on restart */ radius = radius*state->agsraddecay; shortstepscnt = 0; inc(&radiusdecays, _state); inc(&state->repinneriterationscount, _state); goto lbl_4; } for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = -state->d.ptr.p_double[i]/state->diagh.ptr.p_double[i]; } /* * Perform backtracking line search. * Update initial step length depending on search results. * Here we assume that D is non-zero. * * NOTE: if AGSShortLimit subsequent line searches resulted * in steps shorter than AGSStatTolStp, we decrease radius. */ dnrm = 0.0; dg = 0.0; for(i=0; i<=n-1; i++) { dnrm = dnrm+ae_sqr(state->d.ptr.p_double[i], _state); dg = dg+state->d.ptr.p_double[i]*state->samplegmbc.ptr.pp_double[0][i]; } dnrm = ae_sqrt(dnrm, _state); ae_assert(ae_fp_greater(dnrm,(double)(0)), "MinNS: integrity error", _state); alpha = recommendedstep/dnrm; alphadecreased = ae_false; backtrackits = 0; if( fullsample ) { maxbacktrackits = state->agsmaxbacktrack; } else { maxbacktrackits = state->agsmaxbacktracknonfull; } lbl_11: if( ae_false ) { goto lbl_12; } /* * Prepare XN and evaluate merit function at XN */ ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_addd(&state->xn.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), alpha); enforceboundaryconstraints(&state->xn, &state->scaledbndl, &state->hasbndl, &state->scaledbndu, &state->hasbndu, n, 0, _state); ae_v_move(&state->samplex.ptr.pp_double[maxsamplesize][0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); minns_unscalepointbc(state, &state->x, _state); minns_clearrequestfields(state, _state); state->needfij = ae_true; state->rstateags.stage = 3; goto lbl_rcomm; lbl_3: state->needfij = ae_false; minns_generatemeritfunction(state, maxsamplesize, _state); /* * Check sufficient decrease condition */ ae_assert(ae_fp_greater(dnrm,(double)(0)), "MinNS: integrity error", _state); if( ae_fp_less_eq(state->samplef.ptr.p_double[maxsamplesize],state->samplef.ptr.p_double[0]+alpha*state->agsdecrease*dg) ) { goto lbl_12; } /* * Decrease Alpha */ alpha = alpha*state->agsalphadecay; alphadecreased = ae_true; /* * Update and check iterations counter. */ inc(&backtrackits, _state); if( backtrackits>=maxbacktrackits ) { /* * Too many backtracking searches performed without success. * Terminate iterations. */ alpha = 0.0; alphadecreased = ae_true; ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); goto lbl_12; } goto lbl_11; lbl_12: if( (ae_fp_less_eq(alpha*dnrm,state->agsshortstpabs)||ae_fp_less_eq(alpha*dnrm,state->agsshortstprel*radius))||ae_fp_less_eq(ae_fabs(state->samplef.ptr.p_double[0]-state->samplef.ptr.p_double[maxsamplesize], _state),state->agsshortf) ) { inc(&shortstepscnt, _state); } else { shortstepscnt = 0; } if( shortstepscnt>=state->agsshortlimit ) { /* * Too many subsequent short steps. * * It may be possible that optimizer is unable to find out * that we have to decrease radius because of ill-conditioned * gradients. * * Decrease radius and restart. */ radius = radius*state->agsraddecay; shortstepscnt = 0; inc(&radiusdecays, _state); inc(&state->repinneriterationscount, _state); goto lbl_4; } if( !alphadecreased ) { recommendedstep = recommendedstep*2.0; } if( alphadecreased&&fullsample ) { recommendedstep = recommendedstep*0.5; } /* * Next iteration */ ae_v_move(&state->xc.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); inc(&state->repinneriterationscount, _state); goto lbl_4; lbl_5: /* * Convert back from scaled to unscaled representation */ minns_unscalepointbc(state, &state->xc, _state); result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstateags.ia.ptr.p_int[0] = n; state->rstateags.ia.ptr.p_int[1] = nec; state->rstateags.ia.ptr.p_int[2] = nic; state->rstateags.ia.ptr.p_int[3] = ng; state->rstateags.ia.ptr.p_int[4] = nh; state->rstateags.ia.ptr.p_int[5] = i; state->rstateags.ia.ptr.p_int[6] = j; state->rstateags.ia.ptr.p_int[7] = k; state->rstateags.ia.ptr.p_int[8] = radiusdecays; state->rstateags.ia.ptr.p_int[9] = maxsamplesize; state->rstateags.ia.ptr.p_int[10] = cursamplesize; state->rstateags.ia.ptr.p_int[11] = shortstepscnt; state->rstateags.ia.ptr.p_int[12] = backtrackits; state->rstateags.ia.ptr.p_int[13] = maxbacktrackits; state->rstateags.ba.ptr.p_bool[0] = restartneeded; state->rstateags.ba.ptr.p_bool[1] = b; state->rstateags.ba.ptr.p_bool[2] = alphadecreased; state->rstateags.ba.ptr.p_bool[3] = fullsample; state->rstateags.ra.ptr.p_double[0] = radius0; state->rstateags.ra.ptr.p_double[1] = radius; state->rstateags.ra.ptr.p_double[2] = alpha; state->rstateags.ra.ptr.p_double[3] = recommendedstep; state->rstateags.ra.ptr.p_double[4] = dnrm; state->rstateags.ra.ptr.p_double[5] = dg; state->rstateags.ra.ptr.p_double[6] = v; state->rstateags.ra.ptr.p_double[7] = vv; state->rstateags.ra.ptr.p_double[8] = v0; state->rstateags.ra.ptr.p_double[9] = v1; return result; } /************************************************************************* This function calculates merit function (target function + penalties for violation of non-box constraints), using State.X (unscaled), State.Fi, State.J (unscaled) and State.SampleX (scaled) as inputs. Results are loaded: * target function value - to State.SampleF0[SampleIdx] * merit function value - to State.SampleF[SampleIdx] * gradient of merit function - to State.SampleGM[SampleIdx] -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_generatemeritfunction(minnsstate* state, ae_int_t sampleidx, ae_state *_state) { ae_int_t n; ae_int_t i; ae_int_t j; ae_int_t nec; ae_int_t nic; ae_int_t ng; ae_int_t nh; double v; double s; n = state->n; nec = state->nec; nic = state->nic; ng = state->ng; nh = state->nh; /* * Integrity check */ for(i=0; i<=n-1; i++) { ae_assert(!state->hasbndl.ptr.p_bool[i]||ae_fp_greater_eq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i]), "MinNS: integrity error", _state); ae_assert(!state->hasbndu.ptr.p_bool[i]||ae_fp_less_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]), "MinNS: integrity error", _state); } /* * Prepare "raw" function */ state->samplef.ptr.p_double[sampleidx] = state->fi.ptr.p_double[0]; state->samplef0.ptr.p_double[sampleidx] = state->fi.ptr.p_double[0]; for(j=0; j<=n-1; j++) { state->samplegm.ptr.pp_double[sampleidx][j] = state->j.ptr.pp_double[0][j]*state->s.ptr.p_double[j]; } /* * Modify merit function with linear constraints */ for(i=0; i<=nec+nic-1; i++) { v = -state->scaledcleic.ptr.pp_double[i][n]; for(j=0; j<=n-1; j++) { v = v+state->scaledcleic.ptr.pp_double[i][j]*state->samplex.ptr.pp_double[sampleidx][j]; } if( i>=nec&&ae_fp_less(v,(double)(0)) ) { continue; } state->samplef.ptr.p_double[sampleidx] = state->samplef.ptr.p_double[sampleidx]+state->rholinear.ptr.p_double[i]*ae_fabs(v, _state); s = (double)(ae_sign(v, _state)); for(j=0; j<=n-1; j++) { state->samplegm.ptr.pp_double[sampleidx][j] = state->samplegm.ptr.pp_double[sampleidx][j]+state->rholinear.ptr.p_double[i]*s*state->scaledcleic.ptr.pp_double[i][j]; } } /* * Modify merit function with nonlinear constraints */ for(i=1; i<=ng+nh; i++) { v = state->fi.ptr.p_double[i]; if( i<=ng&&ae_fp_eq(v,(double)(0)) ) { continue; } if( i>ng&&ae_fp_less_eq(v,(double)(0)) ) { continue; } state->samplef.ptr.p_double[sampleidx] = state->samplef.ptr.p_double[sampleidx]+state->agsrhononlinear*ae_fabs(v, _state); s = (double)(ae_sign(v, _state)); for(j=0; j<=n-1; j++) { state->samplegm.ptr.pp_double[sampleidx][j] = state->samplegm.ptr.pp_double[sampleidx][j]+state->agsrhononlinear*s*state->j.ptr.pp_double[i][j]*state->s.ptr.p_double[j]; } } } /************************************************************************* This function performs transformation of X from scaled coordinates to unscaled ones, paying special attention to box constraints: * points which were exactly at the boundary before scaling will be mapped to corresponding boundary after scaling * in any case, unscaled box constraints will be satisfied -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_unscalepointbc(minnsstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; for(i=0; i<=state->n-1; i++) { if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],state->scaledbndl.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndl.ptr.p_double[i]; continue; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(x->ptr.p_double[i],state->scaledbndu.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndu.ptr.p_double[i]; continue; } x->ptr.p_double[i] = x->ptr.p_double[i]*state->s.ptr.p_double[i]; if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(x->ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { x->ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } } /************************************************************************* This function solves QP problem of the form [ ] min [ 0.5*c'*(G*inv(H)*G')*c ] s.t. c[i]>=0, SUM(c[i])=1.0 [ ] where G is stored in SampleG[] array, diagonal H is stored in DiagH[]. DbgNCholesky is incremented every time we perform Cholesky decomposition. -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_solveqp(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, ae_int_t* dbgncholesky, minnsqp* state, ae_state *_state) { ae_int_t i; ae_int_t j; ae_int_t k; double v; double vv; ae_int_t n; ae_int_t idx0; ae_int_t idx1; ae_int_t ncandbnd; ae_int_t innerits; ae_int_t outerits; double dnrm; double stp; double stpmax; ae_int_t actidx; double dtol; ae_bool kickneeded; double kicklength; double lambdav; double maxdiag; ae_bool wasactivation; ae_bool werechanges; ae_int_t termcnt; n = nsample; /* * Allocate arrays, prepare data */ rvectorsetlengthatleast(coeffs, n, _state); rvectorsetlengthatleast(&state->xc, n, _state); rvectorsetlengthatleast(&state->xn, n, _state); rvectorsetlengthatleast(&state->x0, n, _state); rvectorsetlengthatleast(&state->gc, n, _state); rvectorsetlengthatleast(&state->d, n, _state); rmatrixsetlengthatleast(&state->uh, n, n, _state); rmatrixsetlengthatleast(&state->ch, n, n, _state); rmatrixsetlengthatleast(&state->rk, nsample, nvars, _state); rvectorsetlengthatleast(&state->invutc, n, _state); rvectorsetlengthatleast(&state->tmp0, n, _state); bvectorsetlengthatleast(&state->tmpb, n, _state); for(i=0; i<=n-1; i++) { state->xc.ptr.p_double[i] = 1.0/n; coeffs->ptr.p_double[i] = 1.0/n; } for(i=0; i<=nsample-1; i++) { for(j=0; j<=nvars-1; j++) { state->rk.ptr.pp_double[i][j] = sampleg->ptr.pp_double[i][j]/ae_sqrt(diagh->ptr.p_double[j], _state); } } rmatrixsyrk(nsample, nvars, 1.0, &state->rk, 0, 0, 0, 0.0, &state->uh, 0, 0, ae_true, _state); maxdiag = 0.0; for(i=0; i<=nsample-1; i++) { maxdiag = ae_maxreal(maxdiag, state->uh.ptr.pp_double[i][i], _state); } maxdiag = coalesce(maxdiag, 1.0, _state); /* * Main cycle: */ innerits = 0; outerits = 0; dtol = 1.0E5*ae_machineepsilon; kicklength = ae_machineepsilon; lambdav = 1.0E5*ae_machineepsilon; termcnt = 0; for(;;) { /* * Save current point to X0 */ ae_v_move(&state->x0.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Calculate gradient at initial point, solve NNLS problem * to determine descent direction D subject to constraints. * * In order to do so we solve following constrained * minimization problem: * ( )^2 * min ( SUM(lambda[i]*A[i]) + G ) * ( ) * Here: * * G is a gradient (column vector) * * A[i] is a column vector of I-th constraint * * lambda[i] is a Lagrange multiplier corresponding to I-th constraint * * NOTE: all A[i] except for last one have only one element being set, * so we rely on sparse capabilities of NNLS solver. However, * in order to use these capabilities we have to reorder variables * in such way that sparse ones come first. * * After finding lambda[] coefficients, we can find constrained descent * direction by subtracting lambda[i]*A[i] from D=-G. We make use of the * fact that first NCandBnd columns are just columns of identity matrix, * so we can perform exact projection by explicitly setting elements of D * to zeros. */ minns_qpcalculategradfunc(sampleg, diagh, nsample, nvars, &state->xc, &state->gc, &state->fc, &state->tmp0, _state); ivectorsetlengthatleast(&state->tmpidx, n, _state); rvectorsetlengthatleast(&state->tmpd, n, _state); rmatrixsetlengthatleast(&state->tmpc2, n, 1, _state); idx0 = 0; ncandbnd = 0; for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xc.ptr.p_double[i],0.0) ) { ncandbnd = ncandbnd+1; } } idx1 = ncandbnd; for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xc.ptr.p_double[i],0.0) ) { /* * Candidate for activation of boundary constraint, * comes first. * * NOTE: multiplication by -1 is due to the fact that * it is lower bound, and has specific direction * of constraint gradient. */ state->tmpidx.ptr.p_int[idx0] = i; state->tmpd.ptr.p_double[idx0] = (-state->gc.ptr.p_double[i])*(-1); state->tmpc2.ptr.pp_double[idx0][0] = 1.0*(-1); idx0 = idx0+1; } else { /* * We are far away from boundary. */ state->tmpidx.ptr.p_int[idx1] = i; state->tmpd.ptr.p_double[idx1] = -state->gc.ptr.p_double[i]; state->tmpc2.ptr.pp_double[idx1][0] = 1.0; idx1 = idx1+1; } } ae_assert(idx0==ncandbnd, "MinNSQP: integrity check failed", _state); ae_assert(idx1==n, "MinNSQP: integrity check failed", _state); snnlsinit(n, 1, n, &state->nnls, _state); snnlssetproblem(&state->nnls, &state->tmpc2, &state->tmpd, ncandbnd, 1, n, _state); snnlsdropnnc(&state->nnls, ncandbnd, _state); snnlssolve(&state->nnls, &state->tmplambdas, _state); for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = -state->gc.ptr.p_double[i]-state->tmplambdas.ptr.p_double[ncandbnd]; } for(i=0; i<=ncandbnd-1; i++) { if( ae_fp_greater(state->tmplambdas.ptr.p_double[i],(double)(0)) ) { state->d.ptr.p_double[state->tmpidx.ptr.p_int[i]] = 0.0; } } /* * Additional stage to "polish" D (improve situation * with sum-to-one constraint and boundary constraints) * and to perform additional integrity check. * * After this stage we are pretty sure that: * * if x[i]=0.0, then d[i]>=0.0 * * if d[i]<0.0, then x[i]>0.0 */ v = 0.0; vv = 0.0; for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xc.ptr.p_double[i],0.0)&&ae_fp_less(state->d.ptr.p_double[i],0.0) ) { state->d.ptr.p_double[i] = 0.0; } v = v+state->d.ptr.p_double[i]; vv = ae_maxreal(vv, ae_fabs(state->gc.ptr.p_double[i], _state), _state); } ae_assert(ae_fp_less(ae_fabs(v, _state),1.0E5*ae_sqrt((double)(n), _state)*ae_machineepsilon*ae_maxreal(vv, 1.0, _state)), "MinNSQP: integrity check failed", _state); /* * Decide whether we need "kick" stage: special stage * that moves us away from boundary constraints which are * not strictly active (i.e. such constraints that x[i]=0.0 and d[i]>0). * * If we need kick stage, we make a kick - and restart iteration. * If not, after this block we can rely on the fact that * for all x[i]=0.0 we have d[i]=0.0 */ kickneeded = ae_false; for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xc.ptr.p_double[i],0.0)&&ae_fp_greater(state->d.ptr.p_double[i],0.0) ) { kickneeded = ae_true; } } if( kickneeded ) { /* * Perform kick. * Restart. * Do not increase outer iterations counter. */ v = 0.0; for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xc.ptr.p_double[i],0.0)&&ae_fp_greater(state->d.ptr.p_double[i],0.0) ) { state->xc.ptr.p_double[i] = state->xc.ptr.p_double[i]+kicklength; } v = v+state->xc.ptr.p_double[i]; } ae_assert(ae_fp_greater(v,0.0), "MinNSQP: integrity check failed", _state); for(i=0; i<=n-1; i++) { state->xc.ptr.p_double[i] = state->xc.ptr.p_double[i]/v; } inc(&innerits, _state); continue; } /* * Calculate Cholesky decomposition of constrained Hessian * for Newton phase. */ for(;;) { for(i=0; i<=n-1; i++) { /* * Diagonal element */ if( ae_fp_greater(state->xc.ptr.p_double[i],0.0) ) { state->ch.ptr.pp_double[i][i] = state->uh.ptr.pp_double[i][i]+lambdav*maxdiag; } else { state->ch.ptr.pp_double[i][i] = 1.0; } /* * Offdiagonal elements */ for(j=i+1; j<=n-1; j++) { if( ae_fp_greater(state->xc.ptr.p_double[i],0.0)&&ae_fp_greater(state->xc.ptr.p_double[j],0.0) ) { state->ch.ptr.pp_double[i][j] = state->uh.ptr.pp_double[i][j]; } else { state->ch.ptr.pp_double[i][j] = 0.0; } } } inc(dbgncholesky, _state); if( !spdmatrixcholeskyrec(&state->ch, 0, n, ae_true, &state->tmp0, _state) ) { /* * Cholesky decomposition failed. * Increase LambdaV and repeat iteration. * Do not increase outer iterations counter. */ lambdav = lambdav*10; continue; } break; } /* * Newton phase */ for(;;) { /* * Calculate constrained (equality and sum-to-one) descent direction D. * * Here we use Sherman-Morrison update to calculate direction subject to * sum-to-one constraint. */ minns_qpcalculategradfunc(sampleg, diagh, nsample, nvars, &state->xc, &state->gc, &state->fc, &state->tmp0, _state); for(i=0; i<=n-1; i++) { if( ae_fp_greater(state->xc.ptr.p_double[i],0.0) ) { state->invutc.ptr.p_double[i] = 1.0; state->d.ptr.p_double[i] = -state->gc.ptr.p_double[i]; } else { state->invutc.ptr.p_double[i] = 0.0; state->d.ptr.p_double[i] = 0.0; } } minns_qpsolveut(&state->ch, n, &state->invutc, _state); minns_qpsolveut(&state->ch, n, &state->d, _state); v = 0.0; vv = 0.0; for(i=0; i<=n-1; i++) { vv = vv+ae_sqr(state->invutc.ptr.p_double[i], _state); v = v+state->invutc.ptr.p_double[i]*state->d.ptr.p_double[i]; } for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = state->d.ptr.p_double[i]-v/vv*state->invutc.ptr.p_double[i]; } minns_qpsolveu(&state->ch, n, &state->d, _state); v = 0.0; k = 0; for(i=0; i<=n-1; i++) { v = v+state->d.ptr.p_double[i]; if( ae_fp_neq(state->d.ptr.p_double[i],0.0) ) { k = k+1; } } if( k>0&&ae_fp_greater(v,0.0) ) { vv = v/k; for(i=0; i<=n-1; i++) { if( ae_fp_neq(state->d.ptr.p_double[i],0.0) ) { state->d.ptr.p_double[i] = state->d.ptr.p_double[i]-vv; } } } /* * Calculate length of D, maximum step and component which is * activated by this step. * * Break if D is exactly zero. We do not break here if DNrm is * small - this check is performed later. It is important to * perform last step with nearly-zero D, it allows us to have * extra-precision in solution which is often needed for convergence * of AGS algorithm. */ dnrm = 0.0; for(i=0; i<=n-1; i++) { dnrm = dnrm+ae_sqr(state->d.ptr.p_double[i], _state); } dnrm = ae_sqrt(dnrm, _state); actidx = -1; stpmax = 1.0E50; for(i=0; i<=n-1; i++) { if( ae_fp_less(state->d.ptr.p_double[i],0.0) ) { v = stpmax; stpmax = safeminposrv(state->xc.ptr.p_double[i], -state->d.ptr.p_double[i], stpmax, _state); if( ae_fp_less(stpmax,v) ) { actidx = i; } } } if( ae_fp_eq(dnrm,0.0) ) { break; } /* * Calculate trial function value at unconstrained full step. * If trial value is greater or equal to FC, terminate iterations. */ for(i=0; i<=n-1; i++) { state->xn.ptr.p_double[i] = state->xc.ptr.p_double[i]+1.0*state->d.ptr.p_double[i]; } minns_qpcalculatefunc(sampleg, diagh, nsample, nvars, &state->xn, &state->fn, &state->tmp0, _state); if( ae_fp_greater_eq(state->fn,state->fc) ) { break; } /* * Perform step * Update Hessian * Update XC * * Break if: * a) no constraint was activated * b) norm of D is small enough */ stp = ae_minreal(1.0, stpmax, _state); for(i=0; i<=n-1; i++) { state->xn.ptr.p_double[i] = ae_maxreal(state->xc.ptr.p_double[i]+stp*state->d.ptr.p_double[i], 0.0, _state); } if( ae_fp_eq(stp,stpmax)&&actidx>=0 ) { state->xn.ptr.p_double[actidx] = 0.0; } wasactivation = ae_false; for(i=0; i<=n-1; i++) { state->tmpb.ptr.p_bool[i] = ae_fp_eq(state->xn.ptr.p_double[i],0.0)&&ae_fp_neq(state->xc.ptr.p_double[i],0.0); wasactivation = wasactivation||state->tmpb.ptr.p_bool[i]; } ae_v_move(&state->xc.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !wasactivation ) { break; } if( ae_fp_less_eq(dnrm,dtol) ) { break; } spdmatrixcholeskyupdatefixbuf(&state->ch, n, ae_true, &state->tmpb, &state->tmp0, _state); } /* * Compare status of boundary constraints - if nothing changed during * last outer iteration, TermCnt is increased. Otherwise it is reset * to zero. * * When TermCnt is large enough, we terminate algorithm. */ werechanges = ae_false; for(i=0; i<=n-1; i++) { werechanges = werechanges||ae_sign(state->x0.ptr.p_double[i], _state)!=ae_sign(state->xc.ptr.p_double[i], _state); } if( !werechanges ) { inc(&termcnt, _state); } else { termcnt = 0; } if( termcnt>=2 ) { break; } /* * Increase number of outer iterations. * Break if we performed too many. */ inc(&outerits, _state); if( outerits==10 ) { break; } } /* * Store result */ for(i=0; i<=n-1; i++) { coeffs->ptr.p_double[i] = state->xc.ptr.p_double[i]; } } /************************************************************************* Function/gradient calculation for QP solver. -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_qpcalculategradfunc(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, /* Real */ ae_vector* g, double* f, /* Real */ ae_vector* tmp, ae_state *_state) { ae_int_t i; ae_int_t j; double v; *f = 0; rvectorsetlengthatleast(g, nsample, _state); rvectorsetlengthatleast(tmp, nvars, _state); /* * Calculate GS*p */ for(j=0; j<=nvars-1; j++) { tmp->ptr.p_double[j] = 0.0; } for(i=0; i<=nsample-1; i++) { v = coeffs->ptr.p_double[i]; ae_v_addd(&tmp->ptr.p_double[0], 1, &sampleg->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1), v); } /* * Calculate F */ *f = 0.0; for(i=0; i<=nvars-1; i++) { *f = *f+0.5*ae_sqr(tmp->ptr.p_double[i], _state)/diagh->ptr.p_double[i]; } /* * Multiply by inverse Hessian */ for(i=0; i<=nvars-1; i++) { tmp->ptr.p_double[i] = tmp->ptr.p_double[i]/diagh->ptr.p_double[i]; } /* * Function gradient */ for(i=0; i<=nsample-1; i++) { v = ae_v_dotproduct(&sampleg->ptr.pp_double[i][0], 1, &tmp->ptr.p_double[0], 1, ae_v_len(0,nvars-1)); g->ptr.p_double[i] = v; } } /************************************************************************* Function calculation for QP solver. -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_qpcalculatefunc(/* Real */ ae_matrix* sampleg, /* Real */ ae_vector* diagh, ae_int_t nsample, ae_int_t nvars, /* Real */ ae_vector* coeffs, double* f, /* Real */ ae_vector* tmp, ae_state *_state) { ae_int_t i; ae_int_t j; double v; *f = 0; rvectorsetlengthatleast(tmp, nvars, _state); /* * Calculate GS*p */ for(j=0; j<=nvars-1; j++) { tmp->ptr.p_double[j] = 0.0; } for(i=0; i<=nsample-1; i++) { v = coeffs->ptr.p_double[i]; ae_v_addd(&tmp->ptr.p_double[0], 1, &sampleg->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1), v); } /* * Calculate F */ *f = 0.0; for(i=0; i<=nvars-1; i++) { *f = *f+0.5*ae_sqr(tmp->ptr.p_double[i], _state)/diagh->ptr.p_double[i]; } } /************************************************************************* Triangular solver for QP solver. -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_qpsolveu(/* Real */ ae_matrix* a, ae_int_t n, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; ae_int_t j; double v; /* * A^(-1)*X */ for(i=n-1; i>=0; i--) { v = x->ptr.p_double[i]; for(j=i+1; j<=n-1; j++) { v = v-a->ptr.pp_double[i][j]*x->ptr.p_double[j]; } x->ptr.p_double[i] = v/a->ptr.pp_double[i][i]; } } /************************************************************************* Triangular solver for QP solver. -- ALGLIB -- Copyright 02.06.2015 by Bochkanov Sergey *************************************************************************/ static void minns_qpsolveut(/* Real */ ae_matrix* a, ae_int_t n, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; ae_int_t j; double v; /* * A^(-T)*X */ for(i=0; i<=n-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]/a->ptr.pp_double[i][i]; v = x->ptr.p_double[i]; for(j=i+1; j<=n-1; j++) { x->ptr.p_double[j] = x->ptr.p_double[j]-a->ptr.pp_double[i][j]*v; } } } void _minnsqp_init(void* _p, ae_state *_state, ae_bool make_automatic) { minnsqp *p = (minnsqp*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->uh, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->ch, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->rk, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invutc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpidx, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->tmpd, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmplambdas, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmpc2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpb, 0, DT_BOOL, _state, make_automatic); _snnlssolver_init(&p->nnls, _state, make_automatic); } void _minnsqp_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minnsqp *dst = (minnsqp*)_dst; minnsqp *src = (minnsqp*)_src; dst->fc = src->fc; dst->fn = src->fn; ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->x0, &src->x0, _state, make_automatic); ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_matrix_init_copy(&dst->uh, &src->uh, _state, make_automatic); ae_matrix_init_copy(&dst->ch, &src->ch, _state, make_automatic); ae_matrix_init_copy(&dst->rk, &src->rk, _state, make_automatic); ae_vector_init_copy(&dst->invutc, &src->invutc, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmpidx, &src->tmpidx, _state, make_automatic); ae_vector_init_copy(&dst->tmpd, &src->tmpd, _state, make_automatic); ae_vector_init_copy(&dst->tmpc, &src->tmpc, _state, make_automatic); ae_vector_init_copy(&dst->tmplambdas, &src->tmplambdas, _state, make_automatic); ae_matrix_init_copy(&dst->tmpc2, &src->tmpc2, _state, make_automatic); ae_vector_init_copy(&dst->tmpb, &src->tmpb, _state, make_automatic); _snnlssolver_init_copy(&dst->nnls, &src->nnls, _state, make_automatic); } void _minnsqp_clear(void* _p) { minnsqp *p = (minnsqp*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->xc); ae_vector_clear(&p->xn); ae_vector_clear(&p->x0); ae_vector_clear(&p->gc); ae_vector_clear(&p->d); ae_matrix_clear(&p->uh); ae_matrix_clear(&p->ch); ae_matrix_clear(&p->rk); ae_vector_clear(&p->invutc); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmpidx); ae_vector_clear(&p->tmpd); ae_vector_clear(&p->tmpc); ae_vector_clear(&p->tmplambdas); ae_matrix_clear(&p->tmpc2); ae_vector_clear(&p->tmpb); _snnlssolver_clear(&p->nnls); } void _minnsqp_destroy(void* _p) { minnsqp *p = (minnsqp*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->x0); ae_vector_destroy(&p->gc); ae_vector_destroy(&p->d); ae_matrix_destroy(&p->uh); ae_matrix_destroy(&p->ch); ae_matrix_destroy(&p->rk); ae_vector_destroy(&p->invutc); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmpidx); ae_vector_destroy(&p->tmpd); ae_vector_destroy(&p->tmpc); ae_vector_destroy(&p->tmplambdas); ae_matrix_destroy(&p->tmpc2); ae_vector_destroy(&p->tmpb); _snnlssolver_destroy(&p->nnls); } void _minnsstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minnsstate *p = (minnsstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic); ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); _rcommstate_init(&p->rstateags, _state, make_automatic); _hqrndstate_init(&p->agsrs, _state, make_automatic); ae_vector_init(&p->xstart, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->grs, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->colmax, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->signmin, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->signmax, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->scaledbndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->scaledbndu, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->scaledcleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->rholinear, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->samplex, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->samplegm, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->samplegmbc, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->samplef, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->samplef0, 0, DT_REAL, _state, make_automatic); _minnsqp_init(&p->nsqp, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->tmp2, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp3, 0, DT_INT, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fp, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fm, 0, DT_REAL, _state, make_automatic); } void _minnsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minnsstate *dst = (minnsstate*)_dst; minnsstate *src = (minnsstate*)_src; dst->solvertype = src->solvertype; dst->n = src->n; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->diffstep = src->diffstep; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic); ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic); dst->nec = src->nec; dst->nic = src->nic; ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic); dst->ng = src->ng; dst->nh = src->nh; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); dst->needfij = src->needfij; dst->needfi = src->needfi; dst->xupdated = src->xupdated; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); _rcommstate_init_copy(&dst->rstateags, &src->rstateags, _state, make_automatic); _hqrndstate_init_copy(&dst->agsrs, &src->agsrs, _state, make_automatic); dst->agsradius = src->agsradius; dst->agssamplesize = src->agssamplesize; dst->agsraddecay = src->agsraddecay; dst->agsalphadecay = src->agsalphadecay; dst->agsdecrease = src->agsdecrease; dst->agsinitstp = src->agsinitstp; dst->agsstattold = src->agsstattold; dst->agsshortstpabs = src->agsshortstpabs; dst->agsshortstprel = src->agsshortstprel; dst->agsshortf = src->agsshortf; dst->agsshortlimit = src->agsshortlimit; dst->agsrhononlinear = src->agsrhononlinear; dst->agsminupdate = src->agsminupdate; dst->agsmaxraddecays = src->agsmaxraddecays; dst->agsmaxbacktrack = src->agsmaxbacktrack; dst->agsmaxbacktracknonfull = src->agsmaxbacktracknonfull; dst->agspenaltylevel = src->agspenaltylevel; dst->agspenaltyincrease = src->agspenaltyincrease; ae_vector_init_copy(&dst->xstart, &src->xstart, _state, make_automatic); ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->grs, &src->grs, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); ae_vector_init_copy(&dst->colmax, &src->colmax, _state, make_automatic); ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic); ae_vector_init_copy(&dst->signmin, &src->signmin, _state, make_automatic); ae_vector_init_copy(&dst->signmax, &src->signmax, _state, make_automatic); dst->userterminationneeded = src->userterminationneeded; ae_vector_init_copy(&dst->scaledbndl, &src->scaledbndl, _state, make_automatic); ae_vector_init_copy(&dst->scaledbndu, &src->scaledbndu, _state, make_automatic); ae_matrix_init_copy(&dst->scaledcleic, &src->scaledcleic, _state, make_automatic); ae_vector_init_copy(&dst->rholinear, &src->rholinear, _state, make_automatic); ae_matrix_init_copy(&dst->samplex, &src->samplex, _state, make_automatic); ae_matrix_init_copy(&dst->samplegm, &src->samplegm, _state, make_automatic); ae_matrix_init_copy(&dst->samplegmbc, &src->samplegmbc, _state, make_automatic); ae_vector_init_copy(&dst->samplef, &src->samplef, _state, make_automatic); ae_vector_init_copy(&dst->samplef0, &src->samplef0, _state, make_automatic); _minnsqp_init_copy(&dst->nsqp, &src->nsqp, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic); ae_matrix_init_copy(&dst->tmp2, &src->tmp2, _state, make_automatic); ae_vector_init_copy(&dst->tmp3, &src->tmp3, _state, make_automatic); ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); ae_vector_init_copy(&dst->fp, &src->fp, _state, make_automatic); ae_vector_init_copy(&dst->fm, &src->fm, _state, make_automatic); dst->repinneriterationscount = src->repinneriterationscount; dst->repouteriterationscount = src->repouteriterationscount; dst->repnfev = src->repnfev; dst->repvaridx = src->repvaridx; dst->repfuncidx = src->repfuncidx; dst->repterminationtype = src->repterminationtype; dst->replcerr = src->replcerr; dst->repnlcerr = src->repnlcerr; dst->dbgncholesky = src->dbgncholesky; } void _minnsstate_clear(void* _p) { minnsstate *p = (minnsstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->s); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->hasbndl); ae_vector_clear(&p->hasbndu); ae_matrix_clear(&p->cleic); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); _rcommstate_clear(&p->rstate); _rcommstate_clear(&p->rstateags); _hqrndstate_clear(&p->agsrs); ae_vector_clear(&p->xstart); ae_vector_clear(&p->xc); ae_vector_clear(&p->xn); ae_vector_clear(&p->grs); ae_vector_clear(&p->d); ae_vector_clear(&p->colmax); ae_vector_clear(&p->diagh); ae_vector_clear(&p->signmin); ae_vector_clear(&p->signmax); ae_vector_clear(&p->scaledbndl); ae_vector_clear(&p->scaledbndu); ae_matrix_clear(&p->scaledcleic); ae_vector_clear(&p->rholinear); ae_matrix_clear(&p->samplex); ae_matrix_clear(&p->samplegm); ae_matrix_clear(&p->samplegmbc); ae_vector_clear(&p->samplef); ae_vector_clear(&p->samplef0); _minnsqp_clear(&p->nsqp); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmp1); ae_matrix_clear(&p->tmp2); ae_vector_clear(&p->tmp3); ae_vector_clear(&p->xbase); ae_vector_clear(&p->fp); ae_vector_clear(&p->fm); } void _minnsstate_destroy(void* _p) { minnsstate *p = (minnsstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->s); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->hasbndl); ae_vector_destroy(&p->hasbndu); ae_matrix_destroy(&p->cleic); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); _rcommstate_destroy(&p->rstate); _rcommstate_destroy(&p->rstateags); _hqrndstate_destroy(&p->agsrs); ae_vector_destroy(&p->xstart); ae_vector_destroy(&p->xc); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->grs); ae_vector_destroy(&p->d); ae_vector_destroy(&p->colmax); ae_vector_destroy(&p->diagh); ae_vector_destroy(&p->signmin); ae_vector_destroy(&p->signmax); ae_vector_destroy(&p->scaledbndl); ae_vector_destroy(&p->scaledbndu); ae_matrix_destroy(&p->scaledcleic); ae_vector_destroy(&p->rholinear); ae_matrix_destroy(&p->samplex); ae_matrix_destroy(&p->samplegm); ae_matrix_destroy(&p->samplegmbc); ae_vector_destroy(&p->samplef); ae_vector_destroy(&p->samplef0); _minnsqp_destroy(&p->nsqp); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmp1); ae_matrix_destroy(&p->tmp2); ae_vector_destroy(&p->tmp3); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->fp); ae_vector_destroy(&p->fm); } void _minnsreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minnsreport *p = (minnsreport*)_p; ae_touch_ptr((void*)p); } void _minnsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minnsreport *dst = (minnsreport*)_dst; minnsreport *src = (minnsreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->cerr = src->cerr; dst->lcerr = src->lcerr; dst->nlcerr = src->nlcerr; dst->terminationtype = src->terminationtype; dst->varidx = src->varidx; dst->funcidx = src->funcidx; } void _minnsreport_clear(void* _p) { minnsreport *p = (minnsreport*)_p; ae_touch_ptr((void*)p); } void _minnsreport_destroy(void* _p) { minnsreport *p = (minnsreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Obsolete function, use MinLBFGSSetPrecDefault() instead. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetdefaultpreconditioner(minlbfgsstate* state, ae_state *_state) { minlbfgssetprecdefault(state, _state); } /************************************************************************* Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void minlbfgssetcholeskypreconditioner(minlbfgsstate* state, /* Real */ ae_matrix* p, ae_bool isupper, ae_state *_state) { minlbfgssetpreccholesky(state, p, isupper, _state); } /************************************************************************* This is obsolete function which was used by previous version of the BLEIC optimizer. It does nothing in the current version of BLEIC. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetbarrierwidth(minbleicstate* state, double mu, ae_state *_state) { } /************************************************************************* This is obsolete function which was used by previous version of the BLEIC optimizer. It does nothing in the current version of BLEIC. -- ALGLIB -- Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ void minbleicsetbarrierdecay(minbleicstate* state, double mudecay, ae_state *_state) { } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ void minasacreate(ae_int_t n, /* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, minasastate* state, ae_state *_state) { ae_int_t i; _minasastate_clear(state); ae_assert(n>=1, "MinASA: N too small!", _state); ae_assert(x->cnt>=n, "MinCGCreate: Length(X)cnt>=n, "MinCGCreate: Length(BndL)cnt>=n, "MinCGCreate: Length(BndU)ptr.p_double[i],bndu->ptr.p_double[i]), "MinASA: inconsistent bounds!", _state); ae_assert(ae_fp_less_eq(bndl->ptr.p_double[i],x->ptr.p_double[i]), "MinASA: infeasible X!", _state); ae_assert(ae_fp_less_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]), "MinASA: infeasible X!", _state); } /* * Initialize */ state->n = n; minasasetcond(state, (double)(0), (double)(0), (double)(0), 0, _state); minasasetxrep(state, ae_false, _state); minasasetstpmax(state, (double)(0), _state); minasasetalgorithm(state, -1, _state); ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->ak, n, _state); ae_vector_set_length(&state->xk, n, _state); ae_vector_set_length(&state->dk, n, _state); ae_vector_set_length(&state->an, n, _state); ae_vector_set_length(&state->xn, n, _state); ae_vector_set_length(&state->dn, n, _state); ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->d, n, _state); ae_vector_set_length(&state->g, n, _state); ae_vector_set_length(&state->gc, n, _state); ae_vector_set_length(&state->work, n, _state); ae_vector_set_length(&state->yk, n, _state); minasarestartfrom(state, x, bndl, bndu, _state); } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetcond(minasastate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinASASetCond: EpsG is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinASASetCond: negative EpsG!", _state); ae_assert(ae_isfinite(epsf, _state), "MinASASetCond: EpsF is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinASASetCond: negative EpsF!", _state); ae_assert(ae_isfinite(epsx, _state), "MinASASetCond: EpsX is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinASASetCond: negative EpsX!", _state); ae_assert(maxits>=0, "MinASASetCond: negative MaxIts!", _state); if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->epsg = epsg; state->epsf = epsf; state->epsx = epsx; state->maxits = maxits; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetalgorithm(minasastate* state, ae_int_t algotype, ae_state *_state) { ae_assert(algotype>=-1&&algotype<=1, "MinASASetAlgorithm: incorrect AlgoType!", _state); if( algotype==-1 ) { algotype = 1; } state->cgtype = algotype; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinASASetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinASASetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ ae_bool minasaiteration(minasastate* state, ae_state *_state) { ae_int_t n; ae_int_t i; double betak; double v; double vv; ae_int_t mcinfo; ae_bool b; ae_bool stepfound; ae_int_t diffcnt; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; i = state->rstate.ia.ptr.p_int[1]; mcinfo = state->rstate.ia.ptr.p_int[2]; diffcnt = state->rstate.ia.ptr.p_int[3]; b = state->rstate.ba.ptr.p_bool[0]; stepfound = state->rstate.ba.ptr.p_bool[1]; betak = state->rstate.ra.ptr.p_double[0]; v = state->rstate.ra.ptr.p_double[1]; vv = state->rstate.ra.ptr.p_double[2]; } else { n = 359; i = -58; mcinfo = -919; diffcnt = -909; b = ae_true; stepfound = ae_true; betak = 74; v = -788; vv = 809; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } /* * Routine body */ /* * Prepare */ n = state->n; state->repterminationtype = 0; state->repiterationscount = 0; state->repnfev = 0; state->debugrestartscount = 0; state->cgtype = 1; ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xk.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xk.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->ak.ptr.p_double[i] = (double)(0); } else { state->ak.ptr.p_double[i] = (double)(1); } } state->mu = 0.1; state->curalgo = 0; /* * Calculate F/G, initialize algorithm */ mincomp_clearrequestfields(state, _state); state->needfg = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfg = ae_false; if( !state->xrep ) { goto lbl_15; } /* * progress report */ mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->xupdated = ae_false; lbl_15: if( ae_fp_less_eq(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) { state->repterminationtype = 4; result = ae_false; return result; } state->repnfev = state->repnfev+1; /* * Main cycle * * At the beginning of new iteration: * * CurAlgo stores current algorithm selector * * State.XK, State.F and State.G store current X/F/G * * State.AK stores current set of active constraints */ lbl_17: if( ae_false ) { goto lbl_18; } /* * GPA algorithm */ if( state->curalgo!=0 ) { goto lbl_19; } state->k = 0; state->acount = 0; lbl_21: if( ae_false ) { goto lbl_22; } /* * Determine Dk = proj(xk - gk)-xk */ for(i=0; i<=n-1; i++) { state->d.ptr.p_double[i] = boundval(state->xk.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state)-state->xk.ptr.p_double[i]; } /* * Armijo line search. * * exact search with alpha=1 is tried first, * 'exact' means that we evaluate f() EXACTLY at * bound(x-g,bndl,bndu), without intermediate floating * point operations. * * alpha<1 are tried if explicit search wasn't successful * Result is placed into XN. * * Two types of search are needed because we can't * just use second type with alpha=1 because in finite * precision arithmetics (x1-x0)+x0 may differ from x1. * So while x1 is correctly bounded (it lie EXACTLY on * boundary, if it is active), (x1-x0)+x0 may be * not bounded. */ v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->dginit = v; state->finit = state->f; if( !(ae_fp_less_eq(mincomp_asad1norm(state, _state),state->stpmax)||ae_fp_eq(state->stpmax,(double)(0))) ) { goto lbl_23; } /* * Try alpha=1 step first */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = boundval(state->xk.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); } mincomp_clearrequestfields(state, _state); state->needfg = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->needfg = ae_false; state->repnfev = state->repnfev+1; stepfound = ae_fp_less_eq(state->f,state->finit+mincomp_gpaftol*state->dginit); goto lbl_24; lbl_23: stepfound = ae_false; lbl_24: if( !stepfound ) { goto lbl_25; } /* * we are at the boundary(ies) */ ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->stp = (double)(1); goto lbl_26; lbl_25: /* * alpha=1 is too large, try smaller values */ state->stp = (double)(1); linminnormalized(&state->d, &state->stp, n, _state); state->dginit = state->dginit/state->stp; state->stp = mincomp_gpadecay*state->stp; if( ae_fp_greater(state->stpmax,(double)(0)) ) { state->stp = ae_minreal(state->stp, state->stpmax, _state); } lbl_27: if( ae_false ) { goto lbl_28; } v = state->stp; ae_v_move(&state->x.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_addd(&state->x.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), v); mincomp_clearrequestfields(state, _state); state->needfg = ae_true; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->needfg = ae_false; state->repnfev = state->repnfev+1; if( ae_fp_less_eq(state->stp,mincomp_stpmin) ) { goto lbl_28; } if( ae_fp_less_eq(state->f,state->finit+state->stp*mincomp_gpaftol*state->dginit) ) { goto lbl_28; } state->stp = state->stp*mincomp_gpadecay; goto lbl_27; lbl_28: ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); lbl_26: state->repiterationscount = state->repiterationscount+1; if( !state->xrep ) { goto lbl_29; } /* * progress report */ mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->xupdated = ae_false; lbl_29: /* * Calculate new set of active constraints. * Reset counter if active set was changed. * Prepare for the new iteration */ for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->an.ptr.p_double[i] = (double)(0); } else { state->an.ptr.p_double[i] = (double)(1); } } for(i=0; i<=n-1; i++) { if( ae_fp_neq(state->ak.ptr.p_double[i],state->an.ptr.p_double[i]) ) { state->acount = -1; break; } } state->acount = state->acount+1; ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->ak.ptr.p_double[0], 1, &state->an.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Stopping conditions */ if( !(state->repiterationscount>=state->maxits&&state->maxits>0) ) { goto lbl_31; } /* * Too many iterations */ state->repterminationtype = 5; if( !state->xrep ) { goto lbl_33; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->xupdated = ae_false; lbl_33: result = ae_false; return result; lbl_31: if( ae_fp_greater(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) { goto lbl_35; } /* * Gradient is small enough */ state->repterminationtype = 4; if( !state->xrep ) { goto lbl_37; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->xupdated = ae_false; lbl_37: result = ae_false; return result; lbl_35: v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_fp_greater(ae_sqrt(v, _state)*state->stp,state->epsx) ) { goto lbl_39; } /* * Step size is too small, no further improvement is * possible */ state->repterminationtype = 2; if( !state->xrep ) { goto lbl_41; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->xupdated = ae_false; lbl_41: result = ae_false; return result; lbl_39: if( ae_fp_greater(state->finit-state->f,state->epsf*ae_maxreal(ae_fabs(state->finit, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) { goto lbl_43; } /* * F(k+1)-F(k) is small enough */ state->repterminationtype = 1; if( !state->xrep ) { goto lbl_45; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->xupdated = ae_false; lbl_45: result = ae_false; return result; lbl_43: /* * Decide - should we switch algorithm or not */ if( mincomp_asauisempty(state, _state) ) { if( ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) { state->curalgo = 1; goto lbl_22; } else { state->mu = state->mu*mincomp_asarho; } } else { if( state->acount==mincomp_n1 ) { if( ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) { state->curalgo = 1; goto lbl_22; } } } /* * Next iteration */ state->k = state->k+1; goto lbl_21; lbl_22: lbl_19: /* * CG algorithm */ if( state->curalgo!=1 ) { goto lbl_47; } /* * first, check that there are non-active constraints. * move to GPA algorithm, if all constraints are active */ b = ae_true; for(i=0; i<=n-1; i++) { if( ae_fp_neq(state->ak.ptr.p_double[i],(double)(0)) ) { b = ae_false; break; } } if( b ) { state->curalgo = 0; goto lbl_17; } /* * CG iterations */ state->fold = state->f; ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); for(i=0; i<=n-1; i++) { state->dk.ptr.p_double[i] = -state->g.ptr.p_double[i]*state->ak.ptr.p_double[i]; state->gc.ptr.p_double[i] = state->g.ptr.p_double[i]*state->ak.ptr.p_double[i]; } lbl_49: if( ae_false ) { goto lbl_50; } /* * Store G[k] for later calculation of Y[k] */ for(i=0; i<=n-1; i++) { state->yk.ptr.p_double[i] = -state->gc.ptr.p_double[i]; } /* * Make a CG step in direction given by DK[]: * * calculate step. Step projection into feasible set * is used. It has several benefits: a) step may be * found with usual line search, b) multiple constraints * may be activated with one step, c) activated constraints * are detected in a natural way - just compare x[i] with * bounds * * update active set, set B to True, if there * were changes in the set. */ ae_v_move(&state->d.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->mcstage = 0; state->stp = (double)(1); linminnormalized(&state->d, &state->stp, n, _state); if( ae_fp_neq(state->laststep,(double)(0)) ) { state->stp = state->laststep; } mcsrch(n, &state->xn, &state->f, &state->gc, &state->d, &state->stp, state->stpmax, mincomp_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); lbl_51: if( state->mcstage==0 ) { goto lbl_52; } /* * preprocess data: bound State.XN so it belongs to the * feasible set and store it in the State.X */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = boundval(state->xn.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); } /* * RComm */ mincomp_clearrequestfields(state, _state); state->needfg = ae_true; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->needfg = ae_false; /* * postprocess data: zero components of G corresponding to * the active constraints */ for(i=0; i<=n-1; i++) { if( ae_fp_eq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->gc.ptr.p_double[i] = (double)(0); } else { state->gc.ptr.p_double[i] = state->g.ptr.p_double[i]; } } mcsrch(n, &state->xn, &state->f, &state->gc, &state->d, &state->stp, state->stpmax, mincomp_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); goto lbl_51; lbl_52: diffcnt = 0; for(i=0; i<=n-1; i++) { /* * XN contains unprojected result, project it, * save copy to X (will be used for progress reporting) */ state->xn.ptr.p_double[i] = boundval(state->xn.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); /* * update active set */ if( ae_fp_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { state->an.ptr.p_double[i] = (double)(0); } else { state->an.ptr.p_double[i] = (double)(1); } if( ae_fp_neq(state->an.ptr.p_double[i],state->ak.ptr.p_double[i]) ) { diffcnt = diffcnt+1; } state->ak.ptr.p_double[i] = state->an.ptr.p_double[i]; } ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repnfev = state->repnfev+state->nfev; state->repiterationscount = state->repiterationscount+1; if( !state->xrep ) { goto lbl_53; } /* * progress report */ mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->xupdated = ae_false; lbl_53: /* * Update info about step length */ v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->laststep = ae_sqrt(v, _state)*state->stp; /* * Check stopping conditions. */ if( ae_fp_greater(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) { goto lbl_55; } /* * Gradient is small enough */ state->repterminationtype = 4; if( !state->xrep ) { goto lbl_57; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->xupdated = ae_false; lbl_57: result = ae_false; return result; lbl_55: if( !(state->repiterationscount>=state->maxits&&state->maxits>0) ) { goto lbl_59; } /* * Too many iterations */ state->repterminationtype = 5; if( !state->xrep ) { goto lbl_61; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->xupdated = ae_false; lbl_61: result = ae_false; return result; lbl_59: if( !(ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state))&&diffcnt==0) ) { goto lbl_63; } /* * These conditions (EpsF/EpsX) are explicitly or implicitly * related to the current step size and influenced * by changes in the active constraints. * * For these reasons they are checked only when we don't * want to 'unstick' at the end of the iteration and there * were no changes in the active set. * * NOTE: consition |G|>=Mu*|D1| must be exactly opposite * to the condition used to switch back to GPA. At least * one inequality must be strict, otherwise infinite cycle * may occur when |G|=Mu*|D1| (we DON'T test stopping * conditions and we DON'T switch to GPA, so we cycle * indefinitely). */ if( ae_fp_greater(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) { goto lbl_65; } /* * F(k+1)-F(k) is small enough */ state->repterminationtype = 1; if( !state->xrep ) { goto lbl_67; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->xupdated = ae_false; lbl_67: result = ae_false; return result; lbl_65: if( ae_fp_greater(state->laststep,state->epsx) ) { goto lbl_69; } /* * X(k+1)-X(k) is small enough */ state->repterminationtype = 2; if( !state->xrep ) { goto lbl_71; } mincomp_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->xupdated = ae_false; lbl_71: result = ae_false; return result; lbl_69: lbl_63: /* * Check conditions for switching */ if( ae_fp_less(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) { state->curalgo = 0; goto lbl_50; } if( diffcnt>0 ) { if( mincomp_asauisempty(state, _state)||diffcnt>=mincomp_n2 ) { state->curalgo = 1; } else { state->curalgo = 0; } goto lbl_50; } /* * Calculate D(k+1) * * Line search may result in: * * maximum feasible step being taken (already processed) * * point satisfying Wolfe conditions * * some kind of error (CG is restarted by assigning 0.0 to Beta) */ if( mcinfo==1 ) { /* * Standard Wolfe conditions are satisfied: * * calculate Y[K] and BetaK */ ae_v_add(&state->yk.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); vv = ae_v_dotproduct(&state->yk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = ae_v_dotproduct(&state->gc.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->betady = v/vv; v = ae_v_dotproduct(&state->gc.ptr.p_double[0], 1, &state->yk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->betahs = v/vv; if( state->cgtype==0 ) { betak = state->betady; } if( state->cgtype==1 ) { betak = ae_maxreal((double)(0), ae_minreal(state->betady, state->betahs, _state), _state); } } else { /* * Something is wrong (may be function is too wild or too flat). * * We'll set BetaK=0, which will restart CG algorithm. * We can stop later (during normal checks) if stopping conditions are met. */ betak = (double)(0); state->debugrestartscount = state->debugrestartscount+1; } ae_v_moveneg(&state->dn.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_addd(&state->dn.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1), betak); ae_v_move(&state->dk.ptr.p_double[0], 1, &state->dn.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * update other information */ state->fold = state->f; state->k = state->k+1; goto lbl_49; lbl_50: lbl_47: goto lbl_17; lbl_18: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = i; state->rstate.ia.ptr.p_int[2] = mcinfo; state->rstate.ia.ptr.p_int[3] = diffcnt; state->rstate.ba.ptr.p_bool[0] = b; state->rstate.ba.ptr.p_bool[1] = stepfound; state->rstate.ra.ptr.p_double[0] = betak; state->rstate.ra.ptr.p_double[1] = v; state->rstate.ra.ptr.p_double[2] = vv; return result; } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ void minasaresults(minasastate* state, /* Real */ ae_vector* x, minasareport* rep, ae_state *_state) { ae_vector_clear(x); _minasareport_clear(rep); minasaresultsbuf(state, x, rep, _state); } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ void minasaresultsbuf(minasastate* state, /* Real */ ae_vector* x, minasareport* rep, ae_state *_state) { ae_int_t i; if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); rep->iterationscount = state->repiterationscount; rep->nfev = state->repnfev; rep->terminationtype = state->repterminationtype; rep->activeconstraints = 0; for(i=0; i<=state->n-1; i++) { if( ae_fp_eq(state->ak.ptr.p_double[i],(double)(0)) ) { rep->activeconstraints = rep->activeconstraints+1; } } } /************************************************************************* Obsolete optimization algorithm. Was replaced by MinBLEIC subpackage. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minasarestartfrom(minasastate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_state *_state) { ae_assert(x->cnt>=state->n, "MinASARestartFrom: Length(X)n, _state), "MinASARestartFrom: X contains infinite or NaN values!", _state); ae_assert(bndl->cnt>=state->n, "MinASARestartFrom: Length(BndL)n, _state), "MinASARestartFrom: BndL contains infinite or NaN values!", _state); ae_assert(bndu->cnt>=state->n, "MinASARestartFrom: Length(BndU)n, _state), "MinASARestartFrom: BndU contains infinite or NaN values!", _state); ae_v_move(&state->x.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); ae_v_move(&state->bndl.ptr.p_double[0], 1, &bndl->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); ae_v_move(&state->bndu.ptr.p_double[0], 1, &bndu->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); state->laststep = (double)(0); ae_vector_set_length(&state->rstate.ia, 3+1, _state); ae_vector_set_length(&state->rstate.ba, 1+1, _state); ae_vector_set_length(&state->rstate.ra, 2+1, _state); state->rstate.stage = -1; mincomp_clearrequestfields(state, _state); } /************************************************************************* Returns norm of bounded anti-gradient. Bounded antigradient is a vector obtained from anti-gradient by zeroing components which point outwards: result = norm(v) v[i]=0 if ((-g[i]<0)and(x[i]=bndl[i])) or ((-g[i]>0)and(x[i]=bndu[i])) v[i]=-g[i] otherwise This function may be used to check a stopping criterion. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ static double mincomp_asaboundedantigradnorm(minasastate* state, ae_state *_state) { ae_int_t i; double v; double result; result = (double)(0); for(i=0; i<=state->n-1; i++) { v = -state->g.ptr.p_double[i]; if( ae_fp_eq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])&&ae_fp_less(-state->g.ptr.p_double[i],(double)(0)) ) { v = (double)(0); } if( ae_fp_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i])&&ae_fp_greater(-state->g.ptr.p_double[i],(double)(0)) ) { v = (double)(0); } result = result+ae_sqr(v, _state); } result = ae_sqrt(result, _state); return result; } /************************************************************************* Returns norm of GI(x). GI(x) is a gradient vector whose components associated with active constraints are zeroed. It differs from bounded anti-gradient because components of GI(x) are zeroed independently of sign(g[i]), and anti-gradient's components are zeroed with respect to both constraint and sign. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ static double mincomp_asaginorm(minasastate* state, ae_state *_state) { ae_int_t i; double result; result = (double)(0); for(i=0; i<=state->n-1; i++) { if( ae_fp_neq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])&&ae_fp_neq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { result = result+ae_sqr(state->g.ptr.p_double[i], _state); } } result = ae_sqrt(result, _state); return result; } /************************************************************************* Returns norm(D1(State.X)) For a meaning of D1 see 'NEW ACTIVE SET ALGORITHM FOR BOX CONSTRAINED OPTIMIZATION' by WILLIAM W. HAGER AND HONGCHAO ZHANG. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ static double mincomp_asad1norm(minasastate* state, ae_state *_state) { ae_int_t i; double result; result = (double)(0); for(i=0; i<=state->n-1; i++) { result = result+ae_sqr(boundval(state->x.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state)-state->x.ptr.p_double[i], _state); } result = ae_sqrt(result, _state); return result; } /************************************************************************* Returns True, if U set is empty. * State.X is used as point, * State.G - as gradient, * D is calculated within function (because State.D may have different meaning depending on current optimization algorithm) For a meaning of U see 'NEW ACTIVE SET ALGORITHM FOR BOX CONSTRAINED OPTIMIZATION' by WILLIAM W. HAGER AND HONGCHAO ZHANG. -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ static ae_bool mincomp_asauisempty(minasastate* state, ae_state *_state) { ae_int_t i; double d; double d2; double d32; ae_bool result; d = mincomp_asad1norm(state, _state); d2 = ae_sqrt(d, _state); d32 = d*d2; result = ae_true; for(i=0; i<=state->n-1; i++) { if( ae_fp_greater_eq(ae_fabs(state->g.ptr.p_double[i], _state),d2)&&ae_fp_greater_eq(ae_minreal(state->x.ptr.p_double[i]-state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i]-state->x.ptr.p_double[i], _state),d32) ) { result = ae_false; return result; } } return result; } /************************************************************************* Clears request fileds (to be sure that we don't forgot to clear something) *************************************************************************/ static void mincomp_clearrequestfields(minasastate* state, ae_state *_state) { state->needfg = ae_false; state->xupdated = ae_false; } void _minasastate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minasastate *p = (minasastate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->ak, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->an, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); _linminstate_init(&p->lstate, _state, make_automatic); } void _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minasastate *dst = (minasastate*)_dst; minasastate *src = (minasastate*)_src; dst->n = src->n; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->stpmax = src->stpmax; dst->cgtype = src->cgtype; dst->k = src->k; dst->nfev = src->nfev; dst->mcstage = src->mcstage; ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); dst->curalgo = src->curalgo; dst->acount = src->acount; dst->mu = src->mu; dst->finit = src->finit; dst->dginit = src->dginit; ae_vector_init_copy(&dst->ak, &src->ak, _state, make_automatic); ae_vector_init_copy(&dst->xk, &src->xk, _state, make_automatic); ae_vector_init_copy(&dst->dk, &src->dk, _state, make_automatic); ae_vector_init_copy(&dst->an, &src->an, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->dn, &src->dn, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->fold = src->fold; dst->stp = src->stp; ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic); ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic); ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic); dst->laststep = src->laststep; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needfg = src->needfg; dst->xupdated = src->xupdated; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); dst->repiterationscount = src->repiterationscount; dst->repnfev = src->repnfev; dst->repterminationtype = src->repterminationtype; dst->debugrestartscount = src->debugrestartscount; _linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic); dst->betahs = src->betahs; dst->betady = src->betady; } void _minasastate_clear(void* _p) { minasastate *p = (minasastate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->ak); ae_vector_clear(&p->xk); ae_vector_clear(&p->dk); ae_vector_clear(&p->an); ae_vector_clear(&p->xn); ae_vector_clear(&p->dn); ae_vector_clear(&p->d); ae_vector_clear(&p->work); ae_vector_clear(&p->yk); ae_vector_clear(&p->gc); ae_vector_clear(&p->x); ae_vector_clear(&p->g); _rcommstate_clear(&p->rstate); _linminstate_clear(&p->lstate); } void _minasastate_destroy(void* _p) { minasastate *p = (minasastate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->ak); ae_vector_destroy(&p->xk); ae_vector_destroy(&p->dk); ae_vector_destroy(&p->an); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->dn); ae_vector_destroy(&p->d); ae_vector_destroy(&p->work); ae_vector_destroy(&p->yk); ae_vector_destroy(&p->gc); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); _rcommstate_destroy(&p->rstate); _linminstate_destroy(&p->lstate); } void _minasareport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minasareport *p = (minasareport*)_p; ae_touch_ptr((void*)p); } void _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minasareport *dst = (minasareport*)_dst; minasareport *src = (minasareport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->terminationtype = src->terminationtype; dst->activeconstraints = src->activeconstraints; } void _minasareport_clear(void* _p) { minasareport *p = (minasareport*)_p; ae_touch_ptr((void*)p); } void _minasareport_destroy(void* _p) { minasareport *p = (minasareport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD) /************************************************************************* NONLINEAR CONJUGATE GRADIENT METHOD DESCRIPTION: The subroutine minimizes function F(x) of N arguments by using one of the nonlinear conjugate gradient methods. These CG methods are globally convergent (even on non-convex functions) as long as grad(f) is Lipschitz continuous in a some neighborhood of the L = { x : f(x)<=f(x0) }. REQUIREMENTS: Algorithm will request following information during its operation: * function value F and its gradient G (simultaneously) at given point X USAGE: 1. User initializes algorithm state with MinCGCreate() call 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and other functions 3. User calls MinCGOptimize() function which takes algorithm state and pointer (delegate, etc.) to callback function which calculates F/G. 4. User calls MinCGResults() to get solution 5. Optionally, user may call MinCGRestartFrom() to solve another problem with same N but another starting point and/or another function. MinCGRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ void mincgcreate(ae_int_t n, /* Real */ ae_vector* x, mincgstate* state, ae_state *_state) { _mincgstate_clear(state); ae_assert(n>=1, "MinCGCreate: N too small!", _state); ae_assert(x->cnt>=n, "MinCGCreate: Length(X)0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where S[] is scaling vector which can be set by MinCGSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation is less robust and precise. L-BFGS needs exact gradient values. Imprecise gradient may slow down convergence, especially on highly nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ void mincgcreatef(ae_int_t n, /* Real */ ae_vector* x, double diffstep, mincgstate* state, ae_state *_state) { _mincgstate_clear(state); ae_assert(n>=1, "MinCGCreateF: N too small!", _state); ae_assert(x->cnt>=n, "MinCGCreateF: Length(X)=0 The subroutine finishes its work if the condition |v|=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} is satisfied. EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinCGSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetcond(mincgstate* state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsg, _state), "MinCGSetCond: EpsG is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsg,(double)(0)), "MinCGSetCond: negative EpsG!", _state); ae_assert(ae_isfinite(epsf, _state), "MinCGSetCond: EpsF is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsf,(double)(0)), "MinCGSetCond: negative EpsF!", _state); ae_assert(ae_isfinite(epsx, _state), "MinCGSetCond: EpsX is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinCGSetCond: negative EpsX!", _state); ae_assert(maxits>=0, "MinCGSetCond: negative MaxIts!", _state); if( ((ae_fp_eq(epsg,(double)(0))&&ae_fp_eq(epsf,(double)(0)))&&ae_fp_eq(epsx,(double)(0)))&&maxits==0 ) { epsx = 1.0E-6; } state->epsg = epsg; state->epsf = epsf; state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function sets scaling coefficients for CG optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Scaling is also used by finite difference variant of CG optimizer - step along I-th axis is equal to DiffStep*S[I]. In most optimizers (and in the CG too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set preconditioner by separate call to one of the MinCGSetPrec...() functions. There is special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void mincgsetscale(mincgstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinCGSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinCGSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinCGSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinCGOptimize(). -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This function turns on/off line search reports. These reports are described in more details in developer-only comments on MinCGState object. INPUT PARAMETERS: State - structure which stores algorithm state NeedDRep- whether line search reports are needed or not This function is intended for private use only. Turning it on artificially may cause program failure. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state) { state->drep = needdrep; } /************************************************************************* This function sets CG algorithm. INPUT PARAMETERS: State - structure which stores algorithm state CGType - algorithm type: * -1 automatic selection of the best algorithm * 0 DY (Dai and Yuan) algorithm * 1 Hybrid DY-HS algorithm -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state) { ae_assert(cgtype>=-1&&cgtype<=1, "MinCGSetCGType: incorrect CGType!", _state); if( cgtype==-1 ) { cgtype = 1; } state->cgtype = cgtype; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinCGSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinCGSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* This function allows to suggest initial step length to the CG algorithm. Suggested step length is used as starting point for the line search. It can be useful when you have badly scaled problem, i.e. when ||grad|| (which is used as initial estimate for the first step) is many orders of magnitude different from the desired step. Line search may fail on such problems without good estimate of initial step length. Imagine, for example, problem with ||grad||=10^50 and desired step equal to 0.1 Line search function will use 10^50 as initial step, then it will decrease step length by 2 (up to 20 attempts) and will get 10^44, which is still too large. This function allows us to tell than line search should be started from some moderate step length, like 1.0, so algorithm will be able to detect desired step length in a several searches. Default behavior (when no step is suggested) is to use preconditioner, if it is available, to generate initial estimate of step length. This function influences only first iteration of algorithm. It should be called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. Suggested step is ignored if you have preconditioner. INPUT PARAMETERS: State - structure used to store algorithm state. Stp - initial estimate of the step length. Can be zero (no estimate). -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state) { ae_assert(ae_isfinite(stp, _state), "MinCGSuggestStep: Stp is infinite or NAN", _state); ae_assert(ae_fp_greater_eq(stp,(double)(0)), "MinCGSuggestStep: Stp<0", _state); state->suggestedstep = stp; } /************************************************************************* This developer-only function allows to retrieve unscaled (!) length of last good step (i.e. step which resulted in sufficient decrease of target function). It can be used in for solution of sequential optimization subproblems, where MinCGSuggestStep() is called with length of previous step as parameter. INPUT PARAMETERS: State - structure used to store algorithm state. RESULT: length of last good step being accepted NOTE: result of this function is undefined if you called it before -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ double mincglastgoodstep(mincgstate* state, ae_state *_state) { double result; result = state->lastgoodstep; return result; } /************************************************************************* Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecdefault(mincgstate* state, ae_state *_state) { state->prectype = 0; state->innerresetneeded = ae_true; } /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is used. INPUT PARAMETERS: State - structure which stores algorithm state D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). NOTE: you can change preconditioner "on the fly", during algorithm iterations. NOTE 2: D[i] should be positive. Exception will be thrown otherwise. NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecdiag(mincgstate* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; ae_assert(d->cnt>=state->n, "MinCGSetPrecDiag: D is too short", _state); for(i=0; i<=state->n-1; i++) { ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinCGSetPrecDiag: D contains infinite or NAN elements", _state); ae_assert(ae_fp_greater(d->ptr.p_double[i],(double)(0)), "MinCGSetPrecDiag: D contains non-positive elements", _state); } mincgsetprecdiagfast(state, d, _state); } /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. This preconditioning mode can be useful when you don't have approximate diagonal of Hessian, but you know that your variables are badly scaled (for example, one variable is in [1,10], and another in [1000,100000]), and most part of the ill-conditioning comes from different scales of vars. In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. IMPRTANT: you should set scale of your variables with MinCGSetScale() call (before or after MinCGSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. INPUT PARAMETERS: State - structure which stores algorithm state NOTE: you can change preconditioner "on the fly", during algorithm iterations. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecscale(mincgstate* state, ae_state *_state) { state->prectype = 3; state->innerresetneeded = ae_true; } /************************************************************************* NOTES: 1. This function has two different implementations: one which uses exact (analytical) user-supplied gradient, and one which uses function value only and numerically differentiates function in order to obtain gradient. Depending on the specific function used to create optimizer object (either MinCGCreate() for analytical gradient or MinCGCreateF() for numerical differentiation) you should choose appropriate variant of MinCGOptimize() - one which accepts function AND gradient or one which accepts function ONLY. Be careful to choose variant of MinCGOptimize() which corresponds to your optimization scheme! Table below lists different combinations of callback (function/gradient) passed to MinCGOptimize() and specific function used to create optimizer. | USER PASSED TO MinCGOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ MinCGCreateF() | work FAIL MinCGCreate() | FAIL work Here "FAIL" denotes inappropriate combinations of optimizer creation function and MinCGOptimize() version. Attemps to use such combination (for example, to create optimizer with MinCGCreateF() and to pass gradient information to MinCGOptimize()) will lead to exception being thrown. Either you did not pass gradient when it WAS needed or you passed gradient when it was NOT needed. -- ALGLIB -- Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ ae_bool mincgiteration(mincgstate* state, ae_state *_state) { ae_int_t n; ae_int_t i; double betak; double v; double vv; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; i = state->rstate.ia.ptr.p_int[1]; betak = state->rstate.ra.ptr.p_double[0]; v = state->rstate.ra.ptr.p_double[1]; vv = state->rstate.ra.ptr.p_double[2]; } else { n = 359; i = -58; betak = -919; v = -909; vv = 81; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } if( state->rstate.stage==15 ) { goto lbl_15; } if( state->rstate.stage==16 ) { goto lbl_16; } if( state->rstate.stage==17 ) { goto lbl_17; } /* * Routine body */ /* * Prepare */ n = state->n; state->terminationneeded = ae_false; state->userterminationneeded = ae_false; state->repterminationtype = 0; state->repiterationscount = 0; state->repnfev = 0; state->debugrestartscount = 0; smoothnessmonitorinit(&state->smonitor, n, 1, state->smoothnessguardlevel>0, _state); rvectorsetlengthatleast(&state->invs, n, _state); for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; state->invs.ptr.p_double[i] = 1/state->s.ptr.p_double[i]; } /* * Check, that transferred derivative value is right */ mincg_clearrequestfields(state, _state); if( !(ae_fp_eq(state->diffstep,(double)(0))&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_18; } lbl_20: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xbase, &state->s, &state->s, &state->s, ae_false, state->teststep, _state) ) { goto lbl_21; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfg = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfg = ae_false; state->smonitor.fi.ptr.p_double[0] = state->f; for(i=0; i<=n-1; i++) { state->smonitor.j.ptr.pp_double[0][i] = state->g.ptr.p_double[i]; } goto lbl_20; lbl_21: lbl_18: /* * Preparations continue: * * set XK * * calculate F/G * * set DK to -G * * powerup algo (it may change preconditioner) * * apply preconditioner to DK * * report update of X * * check stopping conditions for G */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->xbase.ptr.p_double[i]; } ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); mincg_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_22; } state->needfg = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needfg = ae_false; goto lbl_23; lbl_22: state->needf = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->fbase = state->f; i = 0; lbl_24: if( i>n-1 ) { goto lbl_26; } v = state->x.ptr.p_double[i]; state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->fp2 = state->f; state->x.ptr.p_double[i] = v; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); i = i+1; goto lbl_24; lbl_26: state->f = state->fbase; state->needf = ae_false; lbl_23: if( !state->drep ) { goto lbl_27; } /* * Report algorithm powerup (if needed) */ mincg_clearrequestfields(state, _state); state->algpowerup = ae_true; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->algpowerup = ae_false; lbl_27: trimprepare(state->f, &state->trimthreshold, _state); ae_v_moveneg(&state->dk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); mincg_preconditionedmultiply(state, &state->dk, &state->work0, &state->work1, _state); if( !state->xrep ) { goto lbl_29; } mincg_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->xupdated = ae_false; lbl_29: if( state->terminationneeded||state->userterminationneeded ) { /* * Combined termination point for "internal" termination by TerminationNeeded flag * and for "user" termination by MinCGRequestTermination() (UserTerminationNeeded flag). * In this location rules for both of methods are same, thus only one exit point is needed. */ ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) { ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 4; result = ae_false; return result; } state->repnfev = 1; state->k = 0; state->fold = state->f; /* * Choose initial step. * Apply preconditioner, if we have something other than default. */ if( state->prectype==2||state->prectype==3 ) { /* * because we use preconditioner, step length must be equal * to the norm of DK */ v = ae_v_dotproduct(&state->dk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->lastgoodstep = ae_sqrt(v, _state); } else { /* * No preconditioner is used, we try to use suggested step */ if( ae_fp_greater(state->suggestedstep,(double)(0)) ) { state->lastgoodstep = state->suggestedstep; } else { state->lastgoodstep = 1.0; } } /* * Main cycle */ state->rstimer = mincg_rscountdownlen; lbl_31: if( ae_false ) { goto lbl_32; } /* * * clear reset flag * * clear termination flag * * store G[k] for later calculation of Y[k] * * prepare starting point and direction and step length for line search */ state->innerresetneeded = ae_false; state->terminationneeded = ae_false; ae_v_moveneg(&state->yk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->d.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->mcstage = 0; state->stp = 1.0; linminnormalized(&state->d, &state->stp, n, _state); if( ae_fp_neq(state->lastgoodstep,(double)(0)) ) { state->stp = state->lastgoodstep; } state->curstpmax = state->stpmax; /* * Report beginning of line search (if needed) * Terminate algorithm, if user request was detected */ if( !state->drep ) { goto lbl_33; } mincg_clearrequestfields(state, _state); state->lsstart = ae_true; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->lsstart = ae_false; lbl_33: if( state->terminationneeded ) { ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } /* * Minimization along D */ smoothnessmonitorstartlinesearch1u(&state->smonitor, &state->s, &state->invs, &state->x, state->f, &state->g, _state); mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->curstpmax, mincg_gtol, &state->mcinfo, &state->nfev, &state->work0, &state->lstate, &state->mcstage, _state); lbl_35: if( state->mcstage==0 ) { goto lbl_36; } /* * Calculate function/gradient using either * analytical gradient supplied by user * or finite difference approximation. * * "Trim" function in order to handle near-singularity points. */ mincg_clearrequestfields(state, _state); if( ae_fp_neq(state->diffstep,(double)(0)) ) { goto lbl_37; } state->needfg = ae_true; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->needfg = ae_false; goto lbl_38; lbl_37: state->needf = ae_true; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->fbase = state->f; i = 0; lbl_39: if( i>n-1 ) { goto lbl_41; } v = state->x.ptr.p_double[i]; state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->fm2 = state->f; state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->fm1 = state->f; state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->fp1 = state->f; state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; state->rstate.stage = 15; goto lbl_rcomm; lbl_15: state->fp2 = state->f; state->x.ptr.p_double[i] = v; state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); i = i+1; goto lbl_39; lbl_41: state->f = state->fbase; state->needf = ae_false; lbl_38: smoothnessmonitorenqueuepoint1u(&state->smonitor, &state->s, &state->invs, &state->d, state->stp, &state->x, state->f, &state->g, _state); trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); /* * Call MCSRCH again */ mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->curstpmax, mincg_gtol, &state->mcinfo, &state->nfev, &state->work0, &state->lstate, &state->mcstage, _state); goto lbl_35; lbl_36: smoothnessmonitorfinalizelinesearch(&state->smonitor, _state); /* * * terminate algorithm if "user" request for detected * * report end of line search * * store current point to XN * * report iteration * * terminate algorithm if "internal" request was detected */ if( state->userterminationneeded ) { ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } if( !state->drep ) { goto lbl_42; } /* * Report end of line search (if needed) */ mincg_clearrequestfields(state, _state); state->lsend = ae_true; state->rstate.stage = 16; goto lbl_rcomm; lbl_16: state->lsend = ae_false; lbl_42: ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !state->xrep ) { goto lbl_44; } mincg_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 17; goto lbl_rcomm; lbl_17: state->xupdated = ae_false; lbl_44: if( state->terminationneeded ) { ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } /* * Line search is finished. * * calculate BetaK * * calculate DN * * update timers * * calculate step length: * * LastScaledStep is ALWAYS calculated because it is used in the stopping criteria * * LastGoodStep is updated only when MCINFO is equal to 1 (Wolfe conditions hold). * See below for more explanation. */ if( state->mcinfo==1&&!state->innerresetneeded ) { /* * Standard Wolfe conditions hold * Calculate Y[K] and D[K]'*Y[K] */ ae_v_add(&state->yk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); vv = ae_v_dotproduct(&state->yk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); /* * Calculate BetaK according to DY formula */ v = mincg_preconditionedmultiply2(state, &state->g, &state->g, &state->work0, &state->work1, _state); state->betady = v/vv; /* * Calculate BetaK according to HS formula */ v = mincg_preconditionedmultiply2(state, &state->g, &state->yk, &state->work0, &state->work1, _state); state->betahs = v/vv; /* * Choose BetaK */ if( state->cgtype==0 ) { betak = state->betady; } if( state->cgtype==1 ) { betak = ae_maxreal((double)(0), ae_minreal(state->betady, state->betahs, _state), _state); } } else { /* * Something is wrong (may be function is too wild or too flat) * or we just have to restart algo. * * We'll set BetaK=0, which will restart CG algorithm. * We can stop later (during normal checks) if stopping conditions are met. */ betak = (double)(0); state->debugrestartscount = state->debugrestartscount+1; } if( state->repiterationscount>0&&state->repiterationscount%(3+n)==0 ) { /* * clear Beta every N iterations */ betak = (double)(0); } if( state->mcinfo==1||state->mcinfo==5 ) { state->rstimer = mincg_rscountdownlen; } else { state->rstimer = state->rstimer-1; } ae_v_moveneg(&state->dn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); mincg_preconditionedmultiply(state, &state->dn, &state->work0, &state->work1, _state); ae_v_addd(&state->dn.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1), betak); state->lastscaledstep = 0.0; for(i=0; i<=n-1; i++) { state->lastscaledstep = state->lastscaledstep+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } state->lastscaledstep = state->stp*ae_sqrt(state->lastscaledstep, _state); if( state->mcinfo==1 ) { /* * Step is good (Wolfe conditions hold), update LastGoodStep. * * This check for MCINFO=1 is essential because sometimes in the * constrained optimization setting we may take very short steps * (like 1E-15) because we were very close to boundary of the * feasible area. Such short step does not mean that we've converged * to the solution - it was so short because we were close to the * boundary and there was a limit on step length. * * So having such short step is quite normal situation. However, we * should NOT start next iteration from step whose initial length is * estimated as 1E-15 because it may lead to the failure of the * linear minimizer (step is too short, function does not changes, * line search stagnates). */ state->lastgoodstep = (double)(0); for(i=0; i<=n-1; i++) { state->lastgoodstep = state->lastgoodstep+ae_sqr(state->d.ptr.p_double[i], _state); } state->lastgoodstep = state->stp*ae_sqrt(state->lastgoodstep, _state); } /* * Update information. * Check stopping conditions. */ v = (double)(0); for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); } if( !ae_isfinite(v, _state)||!ae_isfinite(state->f, _state) ) { /* * Abnormal termination - infinities in function/gradient */ state->repterminationtype = -8; result = ae_false; return result; } state->repnfev = state->repnfev+state->nfev; state->repiterationscount = state->repiterationscount+1; if( state->repiterationscount>=state->maxits&&state->maxits>0 ) { /* * Too many iterations */ state->repterminationtype = 5; result = ae_false; return result; } if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) { /* * Gradient is small enough */ state->repterminationtype = 4; result = ae_false; return result; } if( !state->innerresetneeded ) { /* * These conditions are checked only when no inner reset was requested by user */ if( ae_fp_less_eq(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) { /* * F(k+1)-F(k) is small enough */ state->repterminationtype = 1; result = ae_false; return result; } if( ae_fp_less_eq(state->lastscaledstep,state->epsx) ) { /* * X(k+1)-X(k) is small enough */ state->repterminationtype = 2; result = ae_false; return result; } } if( state->rstimer<=0 ) { /* * Too many subsequent restarts */ state->repterminationtype = 7; result = ae_false; return result; } /* * Shift Xk/Dk, update other information */ ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_move(&state->dk.ptr.p_double[0], 1, &state->dn.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fold = state->f; state->k = state->k+1; goto lbl_31; lbl_32: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = i; state->rstate.ra.ptr.p_double[0] = betak; state->rstate.ra.ptr.p_double[1] = v; state->rstate.ra.ptr.p_double[2] = vv; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic gradient. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function at the initial point (note: future versions may also perform check at the final point) and compares numerical gradient with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both gradients and specific components highlighted as suspicious by the OptGuard. The primary OptGuard report can be retrieved with mincgoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with mincgsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void mincgoptguardgradient(mincgstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinCGOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinCGOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* This function activates/deactivates nonsmoothness monitoring option of the OptGuard integrity checker. Smoothness monitor silently observes solution process and tries to detect ill-posed problems, i.e. ones with: a) discontinuous target function (non-C0) b) nonsmooth target function (non-C1) Smoothness monitoring does NOT interrupt optimization even if it suspects that your problem is nonsmooth. It just sets corresponding flags in the OptGuard report which can be retrieved after optimization is over. Smoothness monitoring is a moderate overhead option which often adds less than 1% to the optimizer running time. Thus, you can use it even for large scale problems. NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 continuity violations. First, minor errors are hard to catch - say, a 0.0001 difference in the model values at two sides of the gap may be due to discontinuity of the model - or simply because the model has changed. Second, C1-violations are especially difficult to detect in a noninvasive way. The optimizer usually performs very short steps near the nonsmoothness, and differentiation usually introduces a lot of numerical noise. It is hard to tell whether some tiny discontinuity in the slope is due to real nonsmoothness or just due to numerical noise alone. Our top priority was to avoid false positives, so in some rare cases minor errors may went unnoticed (however, in most cases they can be spotted with restart from different initial point). INPUT PARAMETERS: state - algorithm state level - monitoring level: * 0 - monitoring is disabled * 1 - noninvasive low-overhead monitoring; function values and/or gradients are recorded, but OptGuard does not try to perform additional evaluations in order to get more information about suspicious locations. === EXPLANATION ========================================================== One major source of headache during optimization is the possibility of the coding errors in the target function/constraints (or their gradients). Such errors most often manifest themselves as discontinuity or nonsmoothness of the target/constraints. Another frequent situation is when you try to optimize something involving lots of min() and max() operations, i.e. nonsmooth target. Although not a coding error, it is nonsmoothness anyway - and smooth optimizers usually stop right after encountering nonsmoothness, well before reaching solution. OptGuard integrity checker helps you to catch such situations: it monitors function values/gradients being passed to the optimizer and tries to errors. Upon discovering suspicious pair of points it raises appropriate flag (and allows you to continue optimization). When optimization is done, you can study OptGuard result. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardsmoothness(mincgstate* state, ae_int_t level, ae_state *_state) { ae_assert(level==0||level==1, "MinCGOptGuardSmoothness: unexpected value of level parameter", _state); state->smoothnessguardlevel = level; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. === PRIMARY REPORT ======================================================= OptGuard performs several checks which are intended to catch common errors in the implementation of nonlinear function/gradient: * incorrect analytic gradient * discontinuous (non-C0) target functions (constraints) * nonsmooth (non-C1) target functions (constraints) Each of these checks is activated with appropriate function: * mincgoptguardgradient() for gradient verification * mincgoptguardsmoothness() for C0/C1 checks Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradvidx for specific variable (gradient element) suspected * rep.badgradxbase, a point where gradient is tested * rep.badgraduser, user-provided gradient (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.badgradnum, reference gradient obtained via numerical differentiation (stored as 2D matrix with single row in order to make report structure compatible with more complex optimizers like MinNLC or MinLM) * rep.nonc0suspected * rep.nonc1suspected === ADDITIONAL REPORTS/LOGS ============================================== Several different tests are performed to catch C0/C1 errors, you can find out specific test signaled error by looking to: * rep.nonc0test0positive, for non-C0 test #0 * rep.nonc1test0positive, for non-C1 test #0 * rep.nonc1test1positive, for non-C1 test #1 Additional information (including line search logs) can be obtained by means of: * mincgoptguardnonc1test0results() * mincgoptguardnonc1test1results() which return detailed error reports, specific points where discontinuities were found, and so on. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - generic OptGuard report; more detailed reports can be retrieved with other functions. NOTE: false negatives (nonsmooth problems are not identified as nonsmooth ones) are possible although unlikely. The reason is that you need to make several evaluations around nonsmoothness in order to accumulate enough information about function curvature. Say, if you start right from the nonsmooth point, optimizer simply won't get enough data to understand what is going wrong before it terminates due to abrupt changes in the derivative. It is also possible that "unlucky" step will move us to the termination too quickly. Our current approach is to have less than 0.1% false negatives in our test examples (measured with multiple restarts from random points), and to have exactly 0% false positives. -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardresults(mincgstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #0 Nonsmoothness (non-C1) test #0 studies function values (not gradient!) obtained during line searches and monitors behavior of the directional derivative estimate. This test is less powerful than test #1, but it does not depend on the gradient values and thus it is more robust against artifacts introduced by numerical differentiation. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], f[] - arrays of length CNT which store step lengths and function values at these points; f[i] is evaluated in x0+stp[i]*d. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #0 "strong" report lngrep - C1 test #0 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardnonc1test0results(mincgstate* state, optguardnonc1test0report* strrep, optguardnonc1test0report* lngrep, ae_state *_state) { _optguardnonc1test0report_clear(strrep); _optguardnonc1test0report_clear(lngrep); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test0report(&state->smonitor.nonc1test0lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Detailed results of the OptGuard integrity check for nonsmoothness test #1 Nonsmoothness (non-C1) test #1 studies individual components of the gradient computed during line search. When precise analytic gradient is provided this test is more powerful than test #0 which works with function values and ignores user-provided gradient. However, test #0 becomes more powerful when numerical differentiation is employed (in such cases test #1 detects higher levels of numerical noise and becomes too conservative). This test also tells specific components of the gradient which violate C1 continuity, which makes it more informative than #0, which just tells that continuity is violated. Two reports are returned: * a "strongest" one, corresponding to line search which had highest value of the nonsmoothness indicator * a "longest" one, corresponding to line search which had more function evaluations, and thus is more detailed In both cases following fields are returned: * positive - is TRUE when test flagged suspicious point; FALSE if test did not notice anything (in the latter cases fields below are empty). * vidx - is an index of the variable in [0,N) with nonsmooth derivative * x0[], d[] - arrays of length N which store initial point and direction for line search (d[] can be normalized, but does not have to) * stp[], g[] - arrays of length CNT which store step lengths and gradient values at these points; g[i] is evaluated in x0+stp[i]*d and contains vidx-th component of the gradient. * stpidxa, stpidxb - we suspect that function violates C1 continuity between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, with most likely position of the violation between stpidxa+1 and stpidxa+2. ========================================================================== = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will = see where C1 continuity is violated. ========================================================================== INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: strrep - C1 test #1 "strong" report lngrep - C1 test #1 "long" report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void mincgoptguardnonc1test1results(mincgstate* state, optguardnonc1test1report* strrep, optguardnonc1test1report* lngrep, ae_state *_state) { _optguardnonc1test1report_clear(strrep); _optguardnonc1test1report_clear(lngrep); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1strrep, &state->lastscaleused, strrep, _state); smoothnessmonitorexportc1test1report(&state->smonitor.nonc1test1lngrep, &state->lastscaleused, lngrep, _state); } /************************************************************************* Conjugate gradient results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report: * Rep.TerminationType completetion code: * -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. * -7 gradient verification failed. See MinCGSetGradientCheck() for more information. * 1 relative function improvement is no more than EpsF. * 2 relative step is no more than EpsX. * 4 gradient norm is no more than EpsG * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible, we return best X found so far * 8 terminated by user * Rep.IterationsCount contains iterations count * NFEV countains number of function calculations -- ALGLIB -- Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ void mincgresults(mincgstate* state, /* Real */ ae_vector* x, mincgreport* rep, ae_state *_state) { ae_vector_clear(x); _mincgreport_clear(rep); mincgresultsbuf(state, x, rep, _state); } /************************************************************************* Conjugate gradient results Buffered implementation of MinCGResults(), which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ void mincgresultsbuf(mincgstate* state, /* Real */ ae_vector* x, mincgreport* rep, ae_state *_state) { if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } ae_v_move(&x->ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); rep->iterationscount = state->repiterationscount; rep->nfev = state->repnfev; rep->terminationtype = state->repterminationtype; } /************************************************************************* This subroutine restarts CG algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used to store algorithm state. X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void mincgrestartfrom(mincgstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_assert(x->cnt>=state->n, "MinCGRestartFrom: Length(X)n, _state), "MinCGCreate: X contains infinite or NaN values!", _state); ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); mincgsuggeststep(state, 0.0, _state); ae_vector_set_length(&state->rstate.ia, 1+1, _state); ae_vector_set_length(&state->rstate.ra, 2+1, _state); state->rstate.stage = -1; mincg_clearrequestfields(state, _state); } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void mincgrequesttermination(mincgstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* Faster version of MinCGSetPrecDiag(), for time-critical parts of code, without safety checks. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecdiagfast(mincgstate* state, /* Real */ ae_vector* d, ae_state *_state) { ae_int_t i; rvectorsetlengthatleast(&state->diagh, state->n, _state); rvectorsetlengthatleast(&state->diaghl2, state->n, _state); state->prectype = 2; state->vcnt = 0; state->innerresetneeded = ae_true; for(i=0; i<=state->n-1; i++) { state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; state->diaghl2.ptr.p_double[i] = 0.0; } } /************************************************************************* This function sets low-rank preconditioner for Hessian matrix H=D+V'*C*V, where: * H is a Hessian matrix, which is approximated by D/V/C * D=D1+D2 is a diagonal matrix, which includes two positive definite terms: * constant term D1 (is not updated or infrequently updated) * variable term D2 (can be cheaply updated from iteration to iteration) * V is a low-rank correction * C is a diagonal factor of low-rank correction Preconditioner P is calculated using approximate Woodburry formula: P = D^(-1) - D^(-1)*V'*(C^(-1)+V*D1^(-1)*V')^(-1)*V*D^(-1) = D^(-1) - D^(-1)*VC'*VC*D^(-1), where VC = sqrt(B)*V B = (C^(-1)+V*D1^(-1)*V')^(-1) Note that B is calculated using constant term (D1) only, which allows us to update D2 without recalculation of B or VC. Such preconditioner is exact when D2 is zero. When D2 is non-zero, it is only approximation, but very good and cheap one. This function accepts D1, V, C. D2 is set to zero by default. Cost of this update is O(N*VCnt*VCnt), but D2 can be updated in just O(N) by MinCGSetPrecVarPart. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetpreclowrankfast(mincgstate* state, /* Real */ ae_vector* d1, /* Real */ ae_vector* c, /* Real */ ae_matrix* v, ae_int_t vcnt, ae_state *_state) { ae_frame _frame_block; ae_int_t i; ae_int_t j; ae_int_t k; ae_int_t n; double t; ae_matrix b; ae_frame_make(_state, &_frame_block); memset(&b, 0, sizeof(b)); ae_matrix_init(&b, 0, 0, DT_REAL, _state, ae_true); if( vcnt==0 ) { mincgsetprecdiagfast(state, d1, _state); ae_frame_leave(_state); return; } n = state->n; ae_matrix_set_length(&b, vcnt, vcnt, _state); rvectorsetlengthatleast(&state->diagh, n, _state); rvectorsetlengthatleast(&state->diaghl2, n, _state); rmatrixsetlengthatleast(&state->vcorr, vcnt, n, _state); state->prectype = 2; state->vcnt = vcnt; state->innerresetneeded = ae_true; for(i=0; i<=n-1; i++) { state->diagh.ptr.p_double[i] = d1->ptr.p_double[i]; state->diaghl2.ptr.p_double[i] = 0.0; } for(i=0; i<=vcnt-1; i++) { for(j=i; j<=vcnt-1; j++) { t = (double)(0); for(k=0; k<=n-1; k++) { t = t+v->ptr.pp_double[i][k]*v->ptr.pp_double[j][k]/d1->ptr.p_double[k]; } b.ptr.pp_double[i][j] = t; } b.ptr.pp_double[i][i] = b.ptr.pp_double[i][i]+1.0/c->ptr.p_double[i]; } if( !spdmatrixcholeskyrec(&b, 0, vcnt, ae_true, &state->work0, _state) ) { state->vcnt = 0; ae_frame_leave(_state); return; } for(i=0; i<=vcnt-1; i++) { ae_v_move(&state->vcorr.ptr.pp_double[i][0], 1, &v->ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); for(j=0; j<=i-1; j++) { t = b.ptr.pp_double[j][i]; ae_v_subd(&state->vcorr.ptr.pp_double[i][0], 1, &state->vcorr.ptr.pp_double[j][0], 1, ae_v_len(0,n-1), t); } t = 1/b.ptr.pp_double[i][i]; ae_v_muld(&state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), t); } ae_frame_leave(_state); } /************************************************************************* This function updates variable part (diagonal matrix D2) of low-rank preconditioner. This update is very cheap and takes just O(N) time. It has no effect with default preconditioner. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ void mincgsetprecvarpart(mincgstate* state, /* Real */ ae_vector* d2, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; for(i=0; i<=n-1; i++) { state->diaghl2.ptr.p_double[i] = d2->ptr.p_double[i]; } } /************************************************************************* Clears request fileds (to be sure that we don't forgot to clear something) *************************************************************************/ static void mincg_clearrequestfields(mincgstate* state, ae_state *_state) { state->needf = ae_false; state->needfg = ae_false; state->xupdated = ae_false; state->lsstart = ae_false; state->lsend = ae_false; state->algpowerup = ae_false; } /************************************************************************* This function calculates preconditioned product H^(-1)*x and stores result back into X. Work0[] and Work1[] are used as temporaries (size must be at least N; this function doesn't allocate arrays). -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ static void mincg_preconditionedmultiply(mincgstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* work0, /* Real */ ae_vector* work1, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t vcnt; double v; n = state->n; vcnt = state->vcnt; if( state->prectype==0 ) { return; } if( state->prectype==3 ) { for(i=0; i<=n-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; } return; } ae_assert(state->prectype==2, "MinCG: internal error (unexpected PrecType)", _state); /* * handle part common for VCnt=0 and VCnt<>0 */ for(i=0; i<=n-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); } /* * if VCnt>0 */ if( vcnt>0 ) { for(i=0; i<=vcnt-1; i++) { v = ae_v_dotproduct(&state->vcorr.ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); work0->ptr.p_double[i] = v; } for(i=0; i<=n-1; i++) { work1->ptr.p_double[i] = (double)(0); } for(i=0; i<=vcnt-1; i++) { v = work0->ptr.p_double[i]; ae_v_addd(&state->work1.ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); } for(i=0; i<=n-1; i++) { x->ptr.p_double[i] = x->ptr.p_double[i]-state->work1.ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); } } } /************************************************************************* This function calculates preconditioned product x'*H^(-1)*y. Work0[] and Work1[] are used as temporaries (size must be at least N; this function doesn't allocate arrays). -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ static double mincg_preconditionedmultiply2(mincgstate* state, /* Real */ ae_vector* x, /* Real */ ae_vector* y, /* Real */ ae_vector* work0, /* Real */ ae_vector* work1, ae_state *_state) { ae_int_t i; ae_int_t n; ae_int_t vcnt; double v0; double v1; double result; n = state->n; vcnt = state->vcnt; /* * no preconditioning */ if( state->prectype==0 ) { v0 = ae_v_dotproduct(&x->ptr.p_double[0], 1, &y->ptr.p_double[0], 1, ae_v_len(0,n-1)); result = v0; return result; } if( state->prectype==3 ) { result = (double)(0); for(i=0; i<=n-1; i++) { result = result+x->ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]*y->ptr.p_double[i]; } return result; } ae_assert(state->prectype==2, "MinCG: internal error (unexpected PrecType)", _state); /* * low rank preconditioning */ result = 0.0; for(i=0; i<=n-1; i++) { result = result+x->ptr.p_double[i]*y->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); } if( vcnt>0 ) { for(i=0; i<=n-1; i++) { work0->ptr.p_double[i] = x->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); work1->ptr.p_double[i] = y->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); } for(i=0; i<=vcnt-1; i++) { v0 = ae_v_dotproduct(&work0->ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); v1 = ae_v_dotproduct(&work1->ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); result = result-v0*v1; } } return result; } /************************************************************************* Internal initialization subroutine -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ static void mincg_mincginitinternal(ae_int_t n, double diffstep, mincgstate* state, ae_state *_state) { ae_int_t i; /* * Initialize */ state->teststep = (double)(0); state->smoothnessguardlevel = 0; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); state->n = n; state->diffstep = diffstep; state->lastgoodstep = (double)(0); mincgsetcond(state, (double)(0), (double)(0), (double)(0), 0, _state); mincgsetxrep(state, ae_false, _state); mincgsetdrep(state, ae_false, _state); mincgsetstpmax(state, (double)(0), _state); mincgsetcgtype(state, -1, _state); mincgsetprecdefault(state, _state); ae_vector_set_length(&state->xk, n, _state); ae_vector_set_length(&state->dk, n, _state); ae_vector_set_length(&state->xn, n, _state); ae_vector_set_length(&state->dn, n, _state); ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->d, n, _state); ae_vector_set_length(&state->g, n, _state); ae_vector_set_length(&state->work0, n, _state); ae_vector_set_length(&state->work1, n, _state); ae_vector_set_length(&state->yk, n, _state); ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->invs, n, _state); ae_vector_set_length(&state->lastscaleused, n, _state); rvectorsetlengthatleast(&state->xbase, n, _state); for(i=0; i<=n-1; i++) { state->s.ptr.p_double[i] = 1.0; state->invs.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; } } void _mincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { mincgstate *p = (mincgstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->diaghl2, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->vcorr, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->dn, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); _linminstate_init(&p->lstate, _state, make_automatic); ae_vector_init(&p->work0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->work1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->invs, 0, DT_REAL, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); } void _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { mincgstate *dst = (mincgstate*)_dst; mincgstate *src = (mincgstate*)_src; dst->n = src->n; dst->epsg = src->epsg; dst->epsf = src->epsf; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->stpmax = src->stpmax; dst->suggestedstep = src->suggestedstep; dst->xrep = src->xrep; dst->drep = src->drep; dst->cgtype = src->cgtype; dst->prectype = src->prectype; ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic); ae_vector_init_copy(&dst->diaghl2, &src->diaghl2, _state, make_automatic); ae_matrix_init_copy(&dst->vcorr, &src->vcorr, _state, make_automatic); dst->vcnt = src->vcnt; ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); dst->diffstep = src->diffstep; dst->nfev = src->nfev; dst->mcstage = src->mcstage; dst->k = src->k; ae_vector_init_copy(&dst->xk, &src->xk, _state, make_automatic); ae_vector_init_copy(&dst->dk, &src->dk, _state, make_automatic); ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic); ae_vector_init_copy(&dst->dn, &src->dn, _state, make_automatic); ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic); dst->fold = src->fold; dst->stp = src->stp; dst->curstpmax = src->curstpmax; ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic); dst->lastgoodstep = src->lastgoodstep; dst->lastscaledstep = src->lastscaledstep; dst->mcinfo = src->mcinfo; dst->innerresetneeded = src->innerresetneeded; dst->terminationneeded = src->terminationneeded; dst->trimthreshold = src->trimthreshold; ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); dst->rstimer = src->rstimer; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needf = src->needf; dst->needfg = src->needfg; dst->xupdated = src->xupdated; dst->algpowerup = src->algpowerup; dst->lsstart = src->lsstart; dst->lsend = src->lsend; dst->userterminationneeded = src->userterminationneeded; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); dst->repiterationscount = src->repiterationscount; dst->repnfev = src->repnfev; dst->repterminationtype = src->repterminationtype; dst->debugrestartscount = src->debugrestartscount; _linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic); dst->fbase = src->fbase; dst->fm2 = src->fm2; dst->fm1 = src->fm1; dst->fp1 = src->fp1; dst->fp2 = src->fp2; dst->betahs = src->betahs; dst->betady = src->betady; ae_vector_init_copy(&dst->work0, &src->work0, _state, make_automatic); ae_vector_init_copy(&dst->work1, &src->work1, _state, make_automatic); ae_vector_init_copy(&dst->invs, &src->invs, _state, make_automatic); dst->teststep = src->teststep; dst->smoothnessguardlevel = src->smoothnessguardlevel; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); } void _mincgstate_clear(void* _p) { mincgstate *p = (mincgstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->diagh); ae_vector_clear(&p->diaghl2); ae_matrix_clear(&p->vcorr); ae_vector_clear(&p->s); ae_vector_clear(&p->xk); ae_vector_clear(&p->dk); ae_vector_clear(&p->xn); ae_vector_clear(&p->dn); ae_vector_clear(&p->d); ae_vector_clear(&p->yk); ae_vector_clear(&p->xbase); ae_vector_clear(&p->x); ae_vector_clear(&p->g); _rcommstate_clear(&p->rstate); _linminstate_clear(&p->lstate); ae_vector_clear(&p->work0); ae_vector_clear(&p->work1); ae_vector_clear(&p->invs); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); } void _mincgstate_destroy(void* _p) { mincgstate *p = (mincgstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->diagh); ae_vector_destroy(&p->diaghl2); ae_matrix_destroy(&p->vcorr); ae_vector_destroy(&p->s); ae_vector_destroy(&p->xk); ae_vector_destroy(&p->dk); ae_vector_destroy(&p->xn); ae_vector_destroy(&p->dn); ae_vector_destroy(&p->d); ae_vector_destroy(&p->yk); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->x); ae_vector_destroy(&p->g); _rcommstate_destroy(&p->rstate); _linminstate_destroy(&p->lstate); ae_vector_destroy(&p->work0); ae_vector_destroy(&p->work1); ae_vector_destroy(&p->invs); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); } void _mincgreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { mincgreport *p = (mincgreport*)_p; ae_touch_ptr((void*)p); } void _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { mincgreport *dst = (mincgreport*)_dst; mincgreport *src = (mincgreport*)_src; dst->iterationscount = src->iterationscount; dst->nfev = src->nfev; dst->terminationtype = src->terminationtype; } void _mincgreport_clear(void* _p) { mincgreport *p = (mincgreport*)_p; ae_touch_ptr((void*)p); } void _mincgreport_destroy(void* _p) { mincgreport *p = (mincgreport*)_p; ae_touch_ptr((void*)p); } #endif #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD) /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] and Jacobian of f[]. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X * function vector f[] and Jacobian of f[] (simultaneously) at given point There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() and jac() callbacks. First one is used to calculate f[] at given point, second one calculates f[] and Jacobian df[i]/dx[j]. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not provide Jacobian), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateVJ() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatevj(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); ae_assert(n>=1, "MinLMCreateVJ: N<1!", _state); ae_assert(m>=1, "MinLMCreateVJ: M<1!", _state); ae_assert(x->cnt>=n, "MinLMCreateVJ: Length(X)teststep = (double)(0); state->n = n; state->m = m; state->algomode = 1; state->hasf = ae_false; state->hasfi = ae_true; state->hasg = ae_false; /* * second stage of initialization */ minlm_lmprepare(n, m, ae_false, state, _state); minlmsetacctype(state, 0, _state); minlmsetcond(state, (double)(0), 0, _state); minlmsetxrep(state, ae_false, _state); minlmsetstpmax(state, (double)(0), _state); minlmrestartfrom(state, x, _state); } /************************************************************************* IMPROVED LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR LEAST SQUARES OPTIMIZATION DESCRIPTION: This function is used to find minimum of function which is represented as sum of squares: F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) using value of function vector f[] only. Finite differences are used to calculate Jacobian. REQUIREMENTS: This algorithm will request following information during its operation: * function vector f[] at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts fvec() callback. You can try to initialize MinLMState structure with VJ function and then use incorrect version of MinLMOptimize() (for example, version which works with general form function and does not accept function vector), but it will lead to exception being thrown after first attempt to calculate Jacobian. USAGE: 1. User initializes algorithm state with MinLMCreateV() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N/M but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X M - number of functions f[i] X - initial solution, array[0..N-1] DiffStep- differentiation step, >0 OUTPUT PARAMETERS: State - structure which stores algorithm state See also MinLMIteration, MinLMResults. NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatev(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, double diffstep, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); ae_assert(ae_isfinite(diffstep, _state), "MinLMCreateV: DiffStep is not finite!", _state); ae_assert(ae_fp_greater(diffstep,(double)(0)), "MinLMCreateV: DiffStep<=0!", _state); ae_assert(n>=1, "MinLMCreateV: N<1!", _state); ae_assert(m>=1, "MinLMCreateV: M<1!", _state); ae_assert(x->cnt>=n, "MinLMCreateV: Length(X)teststep = (double)(0); state->n = n; state->m = m; state->algomode = 0; state->hasf = ae_false; state->hasfi = ae_true; state->hasg = ae_false; state->diffstep = diffstep; /* * Second stage of initialization */ minlm_lmprepare(n, m, ae_false, state, _state); minlmsetacctype(state, 1, _state); minlmsetcond(state, (double)(0), 0, _state); minlmsetxrep(state, ae_false, _state); minlmsetstpmax(state, (double)(0), _state); minlmrestartfrom(state, x, _state); } /************************************************************************* LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION DESCRIPTION: This function is used to find minimum of general form (not "sum-of- -squares") function F = F(x[0], ..., x[n-1]) using its gradient and Hessian. Levenberg-Marquardt modification with L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization after each Levenberg-Marquardt step is used. REQUIREMENTS: This algorithm will request following information during its operation: * function value F at given point X * F and gradient G (simultaneously) at given point X * F, G and Hessian H (simultaneously) at given point X There are several overloaded versions of MinLMOptimize() function which correspond to different LM-like optimization algorithms provided by this unit. You should choose version which accepts func(), grad() and hess() function pointers. First pointer is used to calculate F at given point, second one calculates F(x) and grad F(x), third one calculates F(x), grad F(x), hess F(x). You can try to initialize MinLMState structure with FGH-function and then use incorrect version of MinLMOptimize() (for example, version which does not provide Hessian matrix), but it will lead to exception being thrown after first attempt to calculate Hessian. USAGE: 1. User initializes algorithm state with MinLMCreateFGH() call 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and other functions 3. User calls MinLMOptimize() function which takes algorithm state and pointers (delegates, etc.) to callback functions. 4. User calls MinLMResults() to get solution 5. Optionally, user may call MinLMRestartFrom() to solve another problem with same N but another starting point and/or another function. MinLMRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - dimension, N>1 * if given, only leading N elements of X are used * if not given, automatically determined from size of X X - initial solution, array[0..N-1] OUTPUT PARAMETERS: State - structure which stores algorithm state NOTES: 1. you may tune stopping conditions with MinLMSetCond() function 2. if target function contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow, use MinLMSetStpMax() function to bound algorithm's steps. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefgh(ae_int_t n, /* Real */ ae_vector* x, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); ae_assert(n>=1, "MinLMCreateFGH: N<1!", _state); ae_assert(x->cnt>=n, "MinLMCreateFGH: Length(X)teststep = (double)(0); state->n = n; state->m = 0; state->algomode = 2; state->hasf = ae_true; state->hasfi = ae_false; state->hasg = ae_true; /* * init2 */ minlm_lmprepare(n, 0, ae_true, state, _state); minlmsetacctype(state, 2, _state); minlmsetcond(state, (double)(0), 0, _state); minlmsetxrep(state, ae_false, _state); minlmsetstpmax(state, (double)(0), _state); minlmrestartfrom(state, x, _state); } /************************************************************************* This function sets stopping conditions for Levenberg-Marquardt optimization algorithm. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by MinLMSetScale() Recommended values: 1E-9 ... 1E-12. MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Only Levenberg-Marquardt iterations are counted (L-BFGS/CG iterations are NOT counted because their cost is very low compared to that of LM). Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (small EpsX). NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM is a second-order method, it performs very precise steps anyway. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetcond(minlmstate* state, double epsx, ae_int_t maxits, ae_state *_state) { ae_assert(ae_isfinite(epsx, _state), "MinLMSetCond: EpsX is not finite number!", _state); ae_assert(ae_fp_greater_eq(epsx,(double)(0)), "MinLMSetCond: negative EpsX!", _state); ae_assert(maxits>=0, "MinLMSetCond: negative MaxIts!", _state); if( ae_fp_eq(epsx,(double)(0))&&maxits==0 ) { epsx = 1.0E-9; } state->epsx = epsx; state->maxits = maxits; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS iterations are reported. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state) { state->xrep = needxrep; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: non-zero StpMax leads to moderate performance degradation because intermediate step of preconditioned L-BFGS optimization is incompatible with limits on step size. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state) { ae_assert(ae_isfinite(stpmax, _state), "MinLMSetStpMax: StpMax is not finite!", _state); ae_assert(ae_fp_greater_eq(stpmax,(double)(0)), "MinLMSetStpMax: StpMax<0!", _state); state->stpmax = stpmax; } /************************************************************************* This function sets scaling coefficients for LM optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Generally, scale is NOT considered to be a form of preconditioner. But LM optimizer is unique in that it uses scaling matrix both in the stopping condition tests and as Marquardt damping factor. Proper scaling is very important for the algorithm performance. It is less important for the quality of results, but still has some influence (it is easier to converge when variables are properly scaled, so premature stopping is possible when very badly scalled variables are combined with relaxed stopping conditions). INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetscale(minlmstate* state, /* Real */ ae_vector* s, ae_state *_state) { ae_int_t i; ae_assert(s->cnt>=state->n, "MinLMSetScale: Length(S)n-1; i++) { ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinLMSetScale: S contains infinite or NAN elements", _state); ae_assert(ae_fp_neq(s->ptr.p_double[i],(double)(0)), "MinLMSetScale: S contains zero elements", _state); state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); } } /************************************************************************* This function sets boundary constraints for LM optimizer Boundary constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another SetBC() call. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify very small number or -INF (latter is recommended because it will allow solver to use better algorithm). BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify very large number or +INF (latter is recommended because it will allow solver to use better algorithm). NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: this solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints or at its boundary -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetbc(minlmstate* state, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_assert(bndl->cnt>=n, "MinLMSetBC: Length(BndL)cnt>=n, "MinLMSetBC: Length(BndU)ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinLMSetBC: BndL contains NAN or +INF", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinLMSetBC: BndU contains NAN or -INF", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); } } /************************************************************************* This function sets general linear constraints for LM optimizer Linear constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another minlmsetlc() call. INPUT PARAMETERS: State - structure stores algorithm state C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT IMPORTANT: if you have linear constraints, it is strongly recommended to set scale of variables with minlmsetscale(). QP solver which is used to calculate linearly constrained steps heavily relies on good scaling of input problems. IMPORTANT: solvers created with minlmcreatefgh() do not support linear constraints. NOTE: linear (non-bound) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations. NOTE: general linear constraints add significant overhead to solution process. Although solver performs roughly same amount of iterations (when compared with similar box-only constrained problem), each iteration now involves solution of linearly constrained QP subproblem, which requires ~3-5 times more Cholesky decompositions. Thus, if you can reformulate your problem in such way this it has only box constraints, it may be beneficial to do so. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ void minlmsetlc(minlmstate* state, /* Real */ ae_matrix* c, /* Integer */ ae_vector* ct, ae_int_t k, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; /* * First, check for errors in the inputs */ ae_assert(k>=0, "MinLMSetLC: K<0", _state); ae_assert(c->cols>=n+1||k==0, "MinLMSetLC: Cols(C)rows>=k, "MinLMSetLC: Rows(C)cnt>=k, "MinLMSetLC: Length(CT)nec = 0; state->nic = 0; return; } /* * Equality constraints are stored first, in the upper * NEC rows of State.CLEIC matrix. Inequality constraints * are stored in the next NIC rows. * * NOTE: we convert inequality constraints to the form * A*x<=b before copying them. */ rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); state->nec = 0; state->nic = 0; for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]==0 ) { ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); state->nec = state->nec+1; } } for(i=0; i<=k-1; i++) { if( ct->ptr.p_int[i]!=0 ) { if( ct->ptr.p_int[i]>0 ) { ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } else { ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); } state->nic = state->nic+1; } } } /************************************************************************* This function is used to change acceleration settings You can choose between three acceleration strategies: * AccType=0, no acceleration. * AccType=1, secant updates are used to update quadratic model after each iteration. After fixed number of iterations (or after model breakdown) we recalculate quadratic model using analytic Jacobian or finite differences. Number of secant-based iterations depends on optimization settings: about 3 iterations - when we have analytic Jacobian, up to 2*N iterations - when we use finite differences to calculate Jacobian. AccType=1 is recommended when Jacobian calculation cost is prohibitively high (several Mx1 function vector calculations followed by several NxN Cholesky factorizations are faster than calculation of one M*N Jacobian). It should also be used when we have no Jacobian, because finite difference approximation takes too much time to compute. Table below list optimization protocols (XYZ protocol corresponds to MinLMCreateXYZ) and acceleration types they support (and use by default). ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: protocol 0 1 comment V + + VJ + + FGH + DEFAULT VALUES: protocol 0 1 comment V x without acceleration it is so slooooooooow VJ x FGH x NOTE: this function should be called before optimization. Attempt to call it during algorithm iterations may result in unexpected behavior. NOTE: attempt to call this function with unsupported protocol/acceleration combination will result in exception being thrown. -- ALGLIB -- Copyright 14.10.2010 by Bochkanov Sergey *************************************************************************/ void minlmsetacctype(minlmstate* state, ae_int_t acctype, ae_state *_state) { ae_assert((acctype==0||acctype==1)||acctype==2, "MinLMSetAccType: incorrect AccType!", _state); if( acctype==2 ) { acctype = 0; } if( acctype==0 ) { state->maxmodelage = 0; state->makeadditers = ae_false; return; } if( acctype==1 ) { ae_assert(state->hasfi, "MinLMSetAccType: AccType=1 is incompatible with current protocol!", _state); if( state->algomode==0 ) { state->maxmodelage = 2*state->n; } else { state->maxmodelage = minlm_smallmodelage; } state->makeadditers = ae_false; return; } } /************************************************************************* NOTES: 1. Depending on function used to create state structure, this algorithm may accept Jacobian and/or Hessian and/or gradient. According to the said above, there ase several versions of this function, which accept different sets of callbacks. This flexibility opens way to subtle errors - you may create state with MinLMCreateFGH() (optimization using Hessian), but call function which does not accept Hessian. So when algorithm will request Hessian, there will be no callback to call. In this case exception will be thrown. Be careful to avoid such errors because there is no way to find them at compile time - you can see them at runtime only. -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ ae_bool minlmiteration(minlmstate* state, ae_state *_state) { ae_int_t n; ae_int_t m; ae_bool bflag; ae_int_t iflag; double v; double s; double t; double fnew; ae_int_t i; ae_int_t k; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { n = state->rstate.ia.ptr.p_int[0]; m = state->rstate.ia.ptr.p_int[1]; iflag = state->rstate.ia.ptr.p_int[2]; i = state->rstate.ia.ptr.p_int[3]; k = state->rstate.ia.ptr.p_int[4]; bflag = state->rstate.ba.ptr.p_bool[0]; v = state->rstate.ra.ptr.p_double[0]; s = state->rstate.ra.ptr.p_double[1]; t = state->rstate.ra.ptr.p_double[2]; fnew = state->rstate.ra.ptr.p_double[3]; } else { n = 359; m = -58; iflag = -919; i = -909; k = 81; bflag = ae_true; v = 74; s = -788; t = 809; fnew = 205; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } if( state->rstate.stage==2 ) { goto lbl_2; } if( state->rstate.stage==3 ) { goto lbl_3; } if( state->rstate.stage==4 ) { goto lbl_4; } if( state->rstate.stage==5 ) { goto lbl_5; } if( state->rstate.stage==6 ) { goto lbl_6; } if( state->rstate.stage==7 ) { goto lbl_7; } if( state->rstate.stage==8 ) { goto lbl_8; } if( state->rstate.stage==9 ) { goto lbl_9; } if( state->rstate.stage==10 ) { goto lbl_10; } if( state->rstate.stage==11 ) { goto lbl_11; } if( state->rstate.stage==12 ) { goto lbl_12; } if( state->rstate.stage==13 ) { goto lbl_13; } if( state->rstate.stage==14 ) { goto lbl_14; } if( state->rstate.stage==15 ) { goto lbl_15; } if( state->rstate.stage==16 ) { goto lbl_16; } if( state->rstate.stage==17 ) { goto lbl_17; } if( state->rstate.stage==18 ) { goto lbl_18; } if( state->rstate.stage==19 ) { goto lbl_19; } if( state->rstate.stage==20 ) { goto lbl_20; } if( state->rstate.stage==21 ) { goto lbl_21; } if( state->rstate.stage==22 ) { goto lbl_22; } if( state->rstate.stage==23 ) { goto lbl_23; } if( state->rstate.stage==24 ) { goto lbl_24; } if( state->rstate.stage==25 ) { goto lbl_25; } if( state->rstate.stage==26 ) { goto lbl_26; } if( state->rstate.stage==27 ) { goto lbl_27; } /* * Routine body */ /* * prepare */ n = state->n; m = state->m; state->repiterationscount = 0; state->repterminationtype = 0; state->repnfunc = 0; state->repnjac = 0; state->repngrad = 0; state->repnhess = 0; state->repncholesky = 0; state->userterminationneeded = ae_false; if( m>0 ) { smoothnessmonitorinit(&state->smonitor, n, m, ae_false, _state); } for(i=0; i<=n-1; i++) { state->lastscaleused.ptr.p_double[i] = state->s.ptr.p_double[i]; } /* * Prepare LM step finder and enforce/check feasibility of constraints */ if( !minlm_minlmstepfinderinit(&state->finderstate, n, m, state->maxmodelage, state->hasfi, &state->xbase, &state->bndl, &state->bndu, &state->cleic, state->nec, state->nic, &state->s, state->stpmax, state->epsx, _state) ) { state->repterminationtype = -3; result = ae_false; return result; } /* * set constraints for obsolete QP solver */ minqpsetbc(&state->qpstate, &state->bndl, &state->bndu, _state); /* * Check correctness of the analytic Jacobian */ minlm_clearrequestfields(state, _state); if( !(state->algomode==1&&ae_fp_greater(state->teststep,(double)(0))) ) { goto lbl_28; } ae_assert(m>0, "MinLM: integrity check failed", _state); lbl_30: if( !smoothnessmonitorcheckgradientatx0(&state->smonitor, &state->xbase, &state->s, &state->bndl, &state->bndu, ae_true, state->teststep, _state) ) { goto lbl_31; } for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = state->smonitor.x.ptr.p_double[i]; } state->needfij = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfij = ae_false; for(i=0; i<=m-1; i++) { state->smonitor.fi.ptr.p_double[i] = state->fi.ptr.p_double[i]; for(k=0; k<=n-1; k++) { state->smonitor.j.ptr.pp_double[i][k] = state->j.ptr.pp_double[i][k]; } } goto lbl_30; lbl_31: lbl_28: /* * Initial report of current point * * Note 1: we rewrite State.X twice because * user may accidentally change it after first call. * * Note 2: we set NeedF or NeedFI depending on what * information about function we have. */ if( !state->xrep ) { goto lbl_32; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); if( !state->hasf ) { goto lbl_34; } state->needf = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needf = ae_false; goto lbl_35; lbl_34: ae_assert(state->hasfi, "MinLM: internal error 2!", _state); state->needfi = ae_true; state->rstate.stage = 2; goto lbl_rcomm; lbl_2: state->needfi = ae_false; v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->f = v; lbl_35: state->repnfunc = state->repnfunc+1; ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 3; goto lbl_rcomm; lbl_3: state->xupdated = ae_false; lbl_32: if( state->userterminationneeded ) { /* * User requested termination */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } /* * Prepare control variables */ state->nu = (double)(1); state->lambdav = -ae_maxrealnumber; state->modelage = state->maxmodelage+1; state->deltaxready = ae_false; state->deltafready = ae_false; if( state->algomode==2 ) { goto lbl_36; } /* * Jacobian-based optimization mode * * Main cycle. * * We move through it until either: * * one of the stopping conditions is met * * we decide that stopping conditions are too stringent * and break from cycle */ lbl_38: if( ae_false ) { goto lbl_39; } /* * First, we have to prepare quadratic model for our function. * We use BFlag to ensure that model is prepared; * if it is false at the end of this block, something went wrong. * * We may either calculate brand new model or update old one. * * Before this block we have: * * State.XBase - current position. * * State.DeltaX - if DeltaXReady is True * * State.DeltaF - if DeltaFReady is True * * After this block is over, we will have: * * State.XBase - base point (unchanged) * * State.FBase - F(XBase) * * State.GBase - linear term * * State.QuadraticModel - quadratic term * * State.LambdaV - current estimate for lambda * * We also clear DeltaXReady/DeltaFReady flags * after initialization is done. */ ae_assert(state->algomode==0||state->algomode==1, "MinLM: integrity check failed", _state); if( !(state->modelage>state->maxmodelage||!(state->deltaxready&&state->deltafready)) ) { goto lbl_40; } /* * Refresh model (using either finite differences or analytic Jacobian) */ if( state->algomode!=0 ) { goto lbl_42; } /* * Optimization using F values only. * Use finite differences to estimate Jacobian. */ ae_assert(state->hasfi, "MinLMIteration: internal error when estimating Jacobian (no f[])", _state); k = 0; lbl_44: if( k>n-1 ) { goto lbl_46; } /* * We guard X[k] from leaving [BndL,BndU]. * In case BndL=BndU, we assume that derivative in this direction is zero. */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; if( state->havebndl.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); } if( state->havebndu.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); } state->xm1 = state->x.ptr.p_double[k]; minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 4; goto lbl_rcomm; lbl_4: state->repnfunc = state->repnfunc+1; ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; if( state->havebndl.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); } if( state->havebndu.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); } state->xp1 = state->x.ptr.p_double[k]; minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 5; goto lbl_rcomm; lbl_5: state->repnfunc = state->repnfunc+1; ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); v = state->xp1-state->xm1; if( ae_fp_neq(v,(double)(0)) ) { v = 1/v; ae_v_moved(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fp1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); ae_v_subd(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fm1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); } else { for(i=0; i<=m-1; i++) { state->j.ptr.pp_double[i][k] = (double)(0); } } k = k+1; goto lbl_44; lbl_46: /* * Calculate F(XBase) */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 6; goto lbl_rcomm; lbl_6: state->needfi = ae_false; state->repnfunc = state->repnfunc+1; state->repnjac = state->repnjac+1; /* * New model */ state->modelage = 0; goto lbl_43; lbl_42: /* * Obtain f[] and Jacobian */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->needfij = ae_true; state->rstate.stage = 7; goto lbl_rcomm; lbl_7: state->needfij = ae_false; state->repnfunc = state->repnfunc+1; state->repnjac = state->repnjac+1; /* * New model */ state->modelage = 0; lbl_43: goto lbl_41; lbl_40: /* * State.J contains Jacobian or its current approximation; * refresh it using secant updates: * * f(x0+dx) = f(x0) + J*dx, * J_new = J_old + u*h' * h = x_new-x_old * u = (f_new - f_old - J_old*h)/(h'h) * * We can explicitly generate h and u, but it is * preferential to do in-place calculations. Only * I-th row of J_old is needed to calculate u[I], * so we can update J row by row in one pass. * * NOTE: we expect that State.XBase contains new point, * State.FBase contains old point, State.DeltaX and * State.DeltaY contain updates from last step. */ ae_assert(state->deltaxready&&state->deltafready, "MinLMIteration: uninitialized DeltaX/DeltaF", _state); t = ae_v_dotproduct(&state->deltax.ptr.p_double[0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_assert(ae_fp_neq(t,(double)(0)), "MinLM: internal error (T=0)", _state); for(i=0; i<=m-1; i++) { v = ae_v_dotproduct(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = (state->deltaf.ptr.p_double[i]-v)/t; ae_v_addd(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1), v); } ae_v_move(&state->fi.ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_add(&state->fi.ptr.p_double[0], 1, &state->deltaf.ptr.p_double[0], 1, ae_v_len(0,m-1)); /* * Increase model age */ state->modelage = state->modelage+1; lbl_41: rmatrixgemm(n, n, m, 2.0, &state->j, 0, 0, 1, &state->j, 0, 0, 0, 0.0, &state->quadraticmodel, 0, 0, _state); rmatrixmv(n, m, &state->j, 0, 0, 1, &state->fi, 0, &state->gbase, 0, _state); ae_v_muld(&state->gbase.ptr.p_double[0], 1, ae_v_len(0,n-1), 2); v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->fbase = v; ae_v_move(&state->fibase.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->deltaxready = ae_false; state->deltafready = ae_false; /* * Perform integrity check (presense of NAN/INF) */ v = state->fbase; for(i=0; i<=n-1; i++) { v = 0.1*v+state->gbase.ptr.p_double[i]; } if( !ae_isfinite(v, _state) ) { /* * Break! */ state->repterminationtype = -8; result = ae_false; return result; } /* * If Lambda is not initialized, initialize it using quadratic model */ if( ae_fp_less(state->lambdav,(double)(0)) ) { state->lambdav = (double)(0); for(i=0; i<=n-1; i++) { state->lambdav = ae_maxreal(state->lambdav, ae_fabs(state->quadraticmodel.ptr.pp_double[i][i], _state)*ae_sqr(state->s.ptr.p_double[i], _state), _state); } state->lambdav = 0.001*state->lambdav; if( ae_fp_eq(state->lambdav,(double)(0)) ) { state->lambdav = (double)(1); } } /* * Find value of Levenberg-Marquardt damping parameter which: * * leads to positive definite damped model * * within bounds specified by StpMax * * generates step which decreases function value * * After this block IFlag is set to: * * -8, if internal integrity control detected NAN/INF in function values * * -3, if constraints are infeasible * * -2, if model update is needed (either Lambda growth is too large * or step is too short, but we can't rely on model and stop iterations) * * -1, if model is fresh, Lambda have grown too large, termination is needed * * 0, if everything is OK, continue iterations * * >0, successful termination, step is less than EpsX * * State.Nu can have any value on enter, but after exit it is set to 1.0 */ iflag = -99; minlm_minlmstepfinderstart(&state->finderstate, &state->quadraticmodel, &state->gbase, state->fbase, &state->xbase, &state->fibase, state->modelage, _state); lbl_47: if( !minlm_minlmstepfinderiteration(&state->finderstate, &state->lambdav, &state->nu, &state->xnew, &state->deltax, &state->deltaxready, &state->deltaf, &state->deltafready, &iflag, &fnew, &state->repncholesky, _state) ) { goto lbl_48; } ae_assert(state->hasfi||state->hasf, "MinLM: internal error 2!", _state); state->repnfunc = state->repnfunc+1; minlm_clearrequestfields(state, _state); if( !state->finderstate.needfi ) { goto lbl_49; } ae_assert(state->hasfi, "MinLM: internal error 2!", _state); ae_v_move(&state->x.ptr.p_double[0], 1, &state->finderstate.x.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->needfi = ae_true; state->rstate.stage = 8; goto lbl_rcomm; lbl_8: state->needfi = ae_false; ae_v_move(&state->finderstate.fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); goto lbl_47; lbl_49: if( !state->finderstate.needf ) { goto lbl_51; } ae_assert(state->hasf, "MinLM: internal error 2!", _state); ae_v_move(&state->x.ptr.p_double[0], 1, &state->finderstate.x.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->needf = ae_true; state->rstate.stage = 9; goto lbl_rcomm; lbl_9: state->needf = ae_false; state->finderstate.f = state->f; goto lbl_47; lbl_51: ae_assert(ae_false, "MinLM: internal error 2!", _state); goto lbl_47; lbl_48: if( state->userterminationneeded ) { /* * User requested termination */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } state->nu = (double)(1); ae_assert(((iflag>=-3&&iflag<=0)||iflag==-8)||iflag>0, "MinLM: internal integrity check failed!", _state); if( iflag==-3 ) { state->repterminationtype = -3; result = ae_false; return result; } if( iflag==-2 ) { state->modelage = state->maxmodelage+1; goto lbl_38; } if( iflag!=-1 ) { goto lbl_53; } /* * Stopping conditions are too stringent */ state->repterminationtype = 7; if( !state->xrep ) { goto lbl_55; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 10; goto lbl_rcomm; lbl_10: state->xupdated = ae_false; lbl_55: result = ae_false; return result; lbl_53: if( !(iflag==-8||iflag>0) ) { goto lbl_57; } /* * Either: * * Integrity check failed - infinities or NANs * * successful termination (step size is small enough) */ state->repterminationtype = iflag; if( !state->xrep ) { goto lbl_59; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 11; goto lbl_rcomm; lbl_11: state->xupdated = ae_false; lbl_59: result = ae_false; return result; lbl_57: state->f = fnew; /* * Levenberg-Marquardt step is ready. * Compare predicted vs. actual decrease and decide what to do with lambda. * * NOTE: we expect that State.DeltaX contains direction of step, * State.F contains function value at new point. */ ae_assert(state->deltaxready, "MinLM: deltaX is not ready", _state); iflag = minlm_checkdecrease(&state->quadraticmodel, &state->gbase, state->fbase, n, &state->deltax, state->f, &state->lambdav, &state->nu, _state); if( iflag==0 ) { goto lbl_61; } state->repterminationtype = iflag; if( !state->xrep ) { goto lbl_63; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 12; goto lbl_rcomm; lbl_12: state->xupdated = ae_false; lbl_63: result = ae_false; return result; lbl_61: /* * Accept step, report it and * test stopping conditions on iterations count and function decrease. * * NOTE: we expect that State.DeltaX contains direction of step, * State.F contains function value at new point. * * NOTE2: we should update XBase ONLY. In the beginning of the next * iteration we expect that State.FIBase is NOT updated and * contains old value of a function vector. */ ae_v_move(&state->xbase.ptr.p_double[0], 1, &state->xnew.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !state->xrep ) { goto lbl_65; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 13; goto lbl_rcomm; lbl_13: state->xupdated = ae_false; lbl_65: state->repiterationscount = state->repiterationscount+1; if( state->repiterationscount>=state->maxits&&state->maxits>0 ) { state->repterminationtype = 5; } if( state->repterminationtype<=0 ) { goto lbl_67; } if( !state->xrep ) { goto lbl_69; } /* * Report: XBase contains new point, F contains function value at new point */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 14; goto lbl_rcomm; lbl_14: state->xupdated = ae_false; lbl_69: result = ae_false; return result; lbl_67: state->modelage = state->modelage+1; goto lbl_38; lbl_39: /* * Lambda is too large, we have to break iterations. */ state->repterminationtype = 7; if( !state->xrep ) { goto lbl_71; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 15; goto lbl_rcomm; lbl_15: state->xupdated = ae_false; lbl_71: goto lbl_37; lbl_36: /* * Legacy Hessian-based mode * * Main cycle. * * We move through it until either: * * one of the stopping conditions is met * * we decide that stopping conditions are too stringent * and break from cycle * */ if( state->nec+state->nic>0 ) { /* * FGH solver does not support general linear constraints */ state->repterminationtype = -5; result = ae_false; return result; } lbl_73: if( ae_false ) { goto lbl_74; } /* * First, we have to prepare quadratic model for our function. * We use BFlag to ensure that model is prepared; * if it is false at the end of this block, something went wrong. * * We may either calculate brand new model or update old one. * * Before this block we have: * * State.XBase - current position. * * State.DeltaX - if DeltaXReady is True * * State.DeltaF - if DeltaFReady is True * * After this block is over, we will have: * * State.XBase - base point (unchanged) * * State.FBase - F(XBase) * * State.GBase - linear term * * State.QuadraticModel - quadratic term * * State.LambdaV - current estimate for lambda * * We also clear DeltaXReady/DeltaFReady flags * after initialization is done. */ bflag = ae_false; if( !(state->algomode==0||state->algomode==1) ) { goto lbl_75; } /* * Calculate f[] and Jacobian */ if( !(state->modelage>state->maxmodelage||!(state->deltaxready&&state->deltafready)) ) { goto lbl_77; } /* * Refresh model (using either finite differences or analytic Jacobian) */ if( state->algomode!=0 ) { goto lbl_79; } /* * Optimization using F values only. * Use finite differences to estimate Jacobian. */ ae_assert(state->hasfi, "MinLMIteration: internal error when estimating Jacobian (no f[])", _state); k = 0; lbl_81: if( k>n-1 ) { goto lbl_83; } /* * We guard X[k] from leaving [BndL,BndU]. * In case BndL=BndU, we assume that derivative in this direction is zero. */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; if( state->havebndl.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); } if( state->havebndu.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); } state->xm1 = state->x.ptr.p_double[k]; minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 16; goto lbl_rcomm; lbl_16: state->repnfunc = state->repnfunc+1; ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; if( state->havebndl.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); } if( state->havebndu.ptr.p_bool[k] ) { state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); } state->xp1 = state->x.ptr.p_double[k]; minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 17; goto lbl_rcomm; lbl_17: state->repnfunc = state->repnfunc+1; ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); v = state->xp1-state->xm1; if( ae_fp_neq(v,(double)(0)) ) { v = 1/v; ae_v_moved(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fp1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); ae_v_subd(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fm1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); } else { for(i=0; i<=m-1; i++) { state->j.ptr.pp_double[i][k] = (double)(0); } } k = k+1; goto lbl_81; lbl_83: /* * Calculate F(XBase) */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->needfi = ae_true; state->rstate.stage = 18; goto lbl_rcomm; lbl_18: state->needfi = ae_false; state->repnfunc = state->repnfunc+1; state->repnjac = state->repnjac+1; /* * New model */ state->modelage = 0; goto lbl_80; lbl_79: /* * Obtain f[] and Jacobian */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->needfij = ae_true; state->rstate.stage = 19; goto lbl_rcomm; lbl_19: state->needfij = ae_false; state->repnfunc = state->repnfunc+1; state->repnjac = state->repnjac+1; /* * New model */ state->modelage = 0; lbl_80: goto lbl_78; lbl_77: /* * State.J contains Jacobian or its current approximation; * refresh it using secant updates: * * f(x0+dx) = f(x0) + J*dx, * J_new = J_old + u*h' * h = x_new-x_old * u = (f_new - f_old - J_old*h)/(h'h) * * We can explicitly generate h and u, but it is * preferential to do in-place calculations. Only * I-th row of J_old is needed to calculate u[I], * so we can update J row by row in one pass. * * NOTE: we expect that State.XBase contains new point, * State.FBase contains old point, State.DeltaX and * State.DeltaY contain updates from last step. */ ae_assert(state->deltaxready&&state->deltafready, "MinLMIteration: uninitialized DeltaX/DeltaF", _state); t = ae_v_dotproduct(&state->deltax.ptr.p_double[0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_assert(ae_fp_neq(t,(double)(0)), "MinLM: internal error (T=0)", _state); for(i=0; i<=m-1; i++) { v = ae_v_dotproduct(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = (state->deltaf.ptr.p_double[i]-v)/t; ae_v_addd(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1), v); } ae_v_move(&state->fi.ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_add(&state->fi.ptr.p_double[0], 1, &state->deltaf.ptr.p_double[0], 1, ae_v_len(0,m-1)); /* * Increase model age */ state->modelage = state->modelage+1; lbl_78: /* * Generate quadratic model: * f(xbase+dx) = * = (f0 + J*dx)'(f0 + J*dx) * = f0^2 + dx'J'f0 + f0*J*dx + dx'J'J*dx * = f0^2 + 2*f0*J*dx + dx'J'J*dx * * Note that we calculate 2*(J'J) instead of J'J because * our quadratic model is based on Tailor decomposition, * i.e. it has 0.5 before quadratic term. */ rmatrixgemm(n, n, m, 2.0, &state->j, 0, 0, 1, &state->j, 0, 0, 0, 0.0, &state->quadraticmodel, 0, 0, _state); rmatrixmv(n, m, &state->j, 0, 0, 1, &state->fi, 0, &state->gbase, 0, _state); ae_v_muld(&state->gbase.ptr.p_double[0], 1, ae_v_len(0,n-1), 2); v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->fbase = v; ae_v_move(&state->fibase.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); /* * set control variables */ bflag = ae_true; lbl_75: if( state->algomode!=2 ) { goto lbl_84; } ae_assert(!state->hasfi, "MinLMIteration: internal error (HasFI is True in Hessian-based mode)", _state); /* * Obtain F, G, H */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->needfgh = ae_true; state->rstate.stage = 20; goto lbl_rcomm; lbl_20: state->needfgh = ae_false; state->repnfunc = state->repnfunc+1; state->repngrad = state->repngrad+1; state->repnhess = state->repnhess+1; rmatrixcopy(n, n, &state->h, 0, 0, &state->quadraticmodel, 0, 0, _state); ae_v_move(&state->gbase.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->fbase = state->f; /* * set control variables */ bflag = ae_true; state->modelage = 0; lbl_84: ae_assert(bflag, "MinLM: internal integrity check failed!", _state); state->deltaxready = ae_false; state->deltafready = ae_false; /* * Perform integrity check (presense of NAN/INF) */ v = state->fbase; for(i=0; i<=n-1; i++) { v = 0.1*v+state->gbase.ptr.p_double[i]; } if( !ae_isfinite(v, _state) ) { /* * Break! */ state->repterminationtype = -8; result = ae_false; return result; } /* * If Lambda is not initialized, initialize it using quadratic model */ if( ae_fp_less(state->lambdav,(double)(0)) ) { state->lambdav = (double)(0); for(i=0; i<=n-1; i++) { state->lambdav = ae_maxreal(state->lambdav, ae_fabs(state->quadraticmodel.ptr.pp_double[i][i], _state)*ae_sqr(state->s.ptr.p_double[i], _state), _state); } state->lambdav = 0.001*state->lambdav; if( ae_fp_eq(state->lambdav,(double)(0)) ) { state->lambdav = (double)(1); } } /* * Find value of Levenberg-Marquardt damping parameter which: * * leads to positive definite damped model * * within bounds specified by StpMax * * generates step which decreases function value * * After this block IFlag is set to: * * -3, if constraints are infeasible * * -2, if model update is needed (either Lambda growth is too large * or step is too short, but we can't rely on model and stop iterations) * * -1, if model is fresh, Lambda have grown too large, termination is needed * * 0, if everything is OK, continue iterations * * State.Nu can have any value on enter, but after exit it is set to 1.0 */ iflag = -99; lbl_86: if( ae_false ) { goto lbl_87; } /* * Do we need model update? */ if( state->modelage>0&&ae_fp_greater_eq(state->nu,minlm_suspiciousnu) ) { iflag = -2; goto lbl_87; } /* * Setup quadratic solver and solve quadratic programming problem. * After problem is solved we'll try to bound step by StpMax * (Lambda will be increased if step size is too large). * * We use BFlag variable to indicate that we have to increase Lambda. * If it is False, we will try to increase Lambda and move to new iteration. */ bflag = ae_true; minqpsetstartingpointfast(&state->qpstate, &state->xbase, _state); minqpsetoriginfast(&state->qpstate, &state->xbase, _state); minqpsetlineartermfast(&state->qpstate, &state->gbase, _state); minqpsetquadratictermfast(&state->qpstate, &state->quadraticmodel, ae_true, 0.0, _state); for(i=0; i<=n-1; i++) { state->tmp0.ptr.p_double[i] = state->quadraticmodel.ptr.pp_double[i][i]+state->lambdav/ae_sqr(state->s.ptr.p_double[i], _state); } minqprewritediagonal(&state->qpstate, &state->tmp0, _state); minqpoptimize(&state->qpstate, _state); minqpresultsbuf(&state->qpstate, &state->xdir, &state->qprep, _state); if( state->qprep.terminationtype>0 ) { /* * successful solution of QP problem */ ae_v_sub(&state->xdir.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = ae_v_dotproduct(&state->xdir.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( ae_isfinite(v, _state) ) { v = ae_sqrt(v, _state); if( ae_fp_greater(state->stpmax,(double)(0))&&ae_fp_greater(v,state->stpmax) ) { bflag = ae_false; } } else { bflag = ae_false; } } else { /* * Either problem is non-convex (increase LambdaV) or constraints are inconsistent */ ae_assert((state->qprep.terminationtype==-3||state->qprep.terminationtype==-4)||state->qprep.terminationtype==-5, "MinLM: unexpected completion code from QP solver", _state); if( state->qprep.terminationtype==-3 ) { iflag = -3; goto lbl_87; } bflag = ae_false; } if( !bflag ) { /* * Solution failed: * try to increase lambda to make matrix positive definite and continue. */ if( !minlm_increaselambda(&state->lambdav, &state->nu, _state) ) { iflag = -1; goto lbl_87; } goto lbl_86; } /* * Step in State.XDir and it is bounded by StpMax. * * We should check stopping conditions on step size here. * DeltaX, which is used for secant updates, is initialized here. * * This code is a bit tricky because sometimes XDir<>0, but * it is so small that XDir+XBase==XBase (in finite precision * arithmetics). So we set DeltaX to XBase, then * add XDir, and then subtract XBase to get exact value of * DeltaX. * * Step length is estimated using DeltaX. * * NOTE: stopping conditions are tested * for fresh models only (ModelAge=0) */ ae_v_move(&state->deltax.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->deltax.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_sub(&state->deltax.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->deltaxready = ae_true; v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->deltax.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( ae_fp_greater(v,state->epsx) ) { goto lbl_88; } if( state->modelage!=0 ) { goto lbl_90; } /* * Step is too short, model is fresh and we can rely on it. * Terminating. */ state->repterminationtype = 2; if( !state->xrep ) { goto lbl_92; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 21; goto lbl_rcomm; lbl_21: state->xupdated = ae_false; lbl_92: result = ae_false; return result; goto lbl_91; lbl_90: /* * Step is suspiciously short, but model is not fresh * and we can't rely on it. */ iflag = -2; goto lbl_87; lbl_91: lbl_88: /* * Let's evaluate new step: * a) if we have Fi vector, we evaluate it using rcomm, and * then we manually calculate State.F as sum of squares of Fi[] * b) if we have F value, we just evaluate it through rcomm interface * * We prefer (a) because we may need Fi vector for additional * iterations */ ae_assert(state->hasfi||state->hasf, "MinLM: internal error 2!", _state); ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_add(&state->x.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); if( !state->hasfi ) { goto lbl_94; } state->needfi = ae_true; state->rstate.stage = 22; goto lbl_rcomm; lbl_22: state->needfi = ae_false; v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->f = v; ae_v_move(&state->deltaf.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_sub(&state->deltaf.ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); state->deltafready = ae_true; goto lbl_95; lbl_94: state->needf = ae_true; state->rstate.stage = 23; goto lbl_rcomm; lbl_23: state->needf = ae_false; lbl_95: state->repnfunc = state->repnfunc+1; if( !ae_isfinite(state->f, _state) ) { /* * Integrity check failed, break! */ state->repterminationtype = -8; result = ae_false; return result; } if( ae_fp_greater_eq(state->f,state->fbase) ) { /* * Increase lambda and continue */ if( !minlm_increaselambda(&state->lambdav, &state->nu, _state) ) { iflag = -1; goto lbl_87; } goto lbl_86; } /* * We've found our step! */ iflag = 0; goto lbl_87; goto lbl_86; lbl_87: if( state->userterminationneeded ) { /* * User requested termination */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->repterminationtype = 8; result = ae_false; return result; } state->nu = (double)(1); ae_assert(iflag>=-3&&iflag<=0, "MinLM: internal integrity check failed!", _state); if( iflag==-3 ) { state->repterminationtype = -3; result = ae_false; return result; } if( iflag==-2 ) { state->modelage = state->maxmodelage+1; goto lbl_73; } if( iflag==-1 ) { goto lbl_74; } /* * Levenberg-Marquardt step is ready. * Compare predicted vs. actual decrease and decide what to do with lambda. * * NOTE: we expect that State.DeltaX contains direction of step, * State.F contains function value at new point. */ ae_assert(state->deltaxready, "MinLM: deltaX is not ready", _state); t = (double)(0); for(i=0; i<=n-1; i++) { v = ae_v_dotproduct(&state->quadraticmodel.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); t = t+state->deltax.ptr.p_double[i]*state->gbase.ptr.p_double[i]+0.5*state->deltax.ptr.p_double[i]*v; } state->predicteddecrease = -t; state->actualdecrease = -(state->f-state->fbase); if( ae_fp_less_eq(state->predicteddecrease,(double)(0)) ) { goto lbl_74; } v = state->actualdecrease/state->predicteddecrease; if( ae_fp_greater_eq(v,0.1) ) { goto lbl_96; } if( minlm_increaselambda(&state->lambdav, &state->nu, _state) ) { goto lbl_98; } /* * Lambda is too large, we have to break iterations. */ state->repterminationtype = 7; if( !state->xrep ) { goto lbl_100; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 24; goto lbl_rcomm; lbl_24: state->xupdated = ae_false; lbl_100: result = ae_false; return result; lbl_98: lbl_96: if( ae_fp_greater(v,0.5) ) { minlm_decreaselambda(&state->lambdav, &state->nu, _state); } /* * Accept step, report it and * test stopping conditions on iterations count and function decrease. * * NOTE: we expect that State.DeltaX contains direction of step, * State.F contains function value at new point. * * NOTE2: we should update XBase ONLY. In the beginning of the next * iteration we expect that State.FIBase is NOT updated and * contains old value of a function vector. */ ae_v_add(&state->xbase.ptr.p_double[0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); if( !state->xrep ) { goto lbl_102; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 25; goto lbl_rcomm; lbl_25: state->xupdated = ae_false; lbl_102: state->repiterationscount = state->repiterationscount+1; if( state->repiterationscount>=state->maxits&&state->maxits>0 ) { state->repterminationtype = 5; } if( state->repterminationtype<=0 ) { goto lbl_104; } if( !state->xrep ) { goto lbl_106; } /* * Report: XBase contains new point, F contains function value at new point */ ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 26; goto lbl_rcomm; lbl_26: state->xupdated = ae_false; lbl_106: result = ae_false; return result; lbl_104: state->modelage = state->modelage+1; goto lbl_73; lbl_74: /* * Lambda is too large, we have to break iterations. */ state->repterminationtype = 7; if( !state->xrep ) { goto lbl_108; } ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); state->f = state->fbase; minlm_clearrequestfields(state, _state); state->xupdated = ae_true; state->rstate.stage = 27; goto lbl_rcomm; lbl_27: state->xupdated = ae_false; lbl_108: lbl_37: result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = n; state->rstate.ia.ptr.p_int[1] = m; state->rstate.ia.ptr.p_int[2] = iflag; state->rstate.ia.ptr.p_int[3] = i; state->rstate.ia.ptr.p_int[4] = k; state->rstate.ba.ptr.p_bool[0] = bflag; state->rstate.ra.ptr.p_double[0] = v; state->rstate.ra.ptr.p_double[1] = s; state->rstate.ra.ptr.p_double[2] = t; state->rstate.ra.ptr.p_double[3] = fnew; return result; } /************************************************************************* This function activates/deactivates verification of the user-supplied analytic Jacobian. Upon activation of this option OptGuard integrity checker performs numerical differentiation of your target function vector at the initial point (note: future versions may also perform check at the final point) and compares numerical Jacobian with analytic one provided by you. If difference is too large, an error flag is set and optimization session continues. After optimization session is over, you can retrieve the report which stores both Jacobians, and specific components highlighted as suspicious by the OptGuard. The OptGuard report can be retrieved with minlmoptguardresults(). IMPORTANT: gradient check is a high-overhead option which will cost you about 3*N additional function evaluations. In many cases it may cost as much as the rest of the optimization session. YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. NOTE: unlike previous incarnation of the gradient checking code, OptGuard does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step used for numerical differentiation: * TestStep=0 turns verification off * TestStep>0 activates verification You should carefully choose TestStep. Value which is too large (so large that function behavior is non- cubic at this scale) will lead to false alarms. Too short step will result in rounding errors dominating numerical derivative. You may use different step for different parameters by means of setting scale with minlmsetscale(). === EXPLANATION ========================================================== In order to verify gradient algorithm performs following steps: * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], where X[i] is i-th component of the initial point and S[i] is a scale of i-th parameter * F(X) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point -- ALGLIB -- Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ void minlmoptguardgradient(minlmstate* state, double teststep, ae_state *_state) { ae_assert(ae_isfinite(teststep, _state), "MinLMOptGuardGradient: TestStep contains NaN or INF", _state); ae_assert(ae_fp_greater_eq(teststep,(double)(0)), "MinLMOptGuardGradient: invalid argument TestStep(TestStep<0)", _state); state->teststep = teststep; } /************************************************************************* Results of OptGuard integrity check, should be called after optimization session is over. OptGuard checks analytic Jacobian against reference value obtained by numerical differentiation with user-specified step. NOTE: other optimizers perform additional OptGuard checks for things like C0/C1-continuity violations. However, LM optimizer can check only for incorrect Jacobian. The reason is that unlike line search methods LM optimizer does not perform extensive evaluations along the line. Thus, we simply do not have enough data to catch C0/C1-violations. This check is activated with minlmoptguardgradient() function. Following flags are set when these errors are suspected: * rep.badgradsuspected, and additionally: * rep.badgradfidx for specific function (Jacobian row) suspected * rep.badgradvidx for specific variable (Jacobian column) suspected * rep.badgradxbase, a point where gradient/Jacobian is tested * rep.badgraduser, user-provided gradient/Jacobian * rep.badgradnum, reference gradient/Jacobian obtained via numerical differentiation INPUT PARAMETERS: state - algorithm state OUTPUT PARAMETERS: rep - OptGuard report -- ALGLIB -- Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ void minlmoptguardresults(minlmstate* state, optguardreport* rep, ae_state *_state) { _optguardreport_clear(rep); smoothnessmonitorexportreport(&state->smonitor, rep, _state); } /************************************************************************* Levenberg-Marquardt algorithm results NOTE: if you activated OptGuard integrity checking functionality and want to get OptGuard report, it can be retrieved with the help of minlmoptguardresults() function. INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution Rep - optimization report; includes termination codes and additional information. Termination codes are listed below, see comments for this structure for more info. Termination code is stored in rep.terminationtype field: * -8 optimizer detected NAN/INF values either in the function itself, or in its Jacobian * -3 constraints are inconsistent * 2 relative step is no more than EpsX. * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible * 8 terminated by user who called minlmrequesttermination(). X contains point which was "current accepted" when termination request was submitted. -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmresults(minlmstate* state, /* Real */ ae_vector* x, minlmreport* rep, ae_state *_state) { ae_vector_clear(x); _minlmreport_clear(rep); minlmresultsbuf(state, x, rep, _state); } /************************************************************************* Levenberg-Marquardt algorithm results Buffered implementation of MinLMResults(), which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmresultsbuf(minlmstate* state, /* Real */ ae_vector* x, minlmreport* rep, ae_state *_state) { if( x->cntn ) { ae_vector_set_length(x, state->n, _state); } ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); rep->iterationscount = state->repiterationscount; rep->terminationtype = state->repterminationtype; rep->nfunc = state->repnfunc; rep->njac = state->repnjac; rep->ngrad = state->repngrad; rep->nhess = state->repnhess; rep->ncholesky = state->repncholesky; } /************************************************************************* This subroutine restarts LM algorithm from new point. All optimization parameters are left unchanged. This function allows to solve multiple optimization problems (which must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: State - structure used for reverse communication previously allocated with MinLMCreateXXX call. X - new starting point. -- ALGLIB -- Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ void minlmrestartfrom(minlmstate* state, /* Real */ ae_vector* x, ae_state *_state) { ae_assert(x->cnt>=state->n, "MinLMRestartFrom: Length(X)n, _state), "MinLMRestartFrom: X contains infinite or NaN values!", _state); ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); ae_vector_set_length(&state->rstate.ia, 4+1, _state); ae_vector_set_length(&state->rstate.ba, 0+1, _state); ae_vector_set_length(&state->rstate.ra, 3+1, _state); state->rstate.stage = -1; minlm_clearrequestfields(state, _state); } /************************************************************************* This subroutine submits request for termination of running optimizer. It should be called from user-supplied callback when user decides that it is time to "smoothly" terminate optimization process. As result, optimizer stops at point which was "current accepted" when termination request was submitted and returns error code 8 (successful termination). INPUT PARAMETERS: State - optimizer structure NOTE: after request for termination optimizer may perform several additional calls to user-supplied callbacks. It does NOT guarantee to stop immediately - it just guarantees that these additional calls will be discarded later. NOTE: calling this function on optimizer which is NOT running will have no effect. NOTE: multiple calls to this function are possible. First call is counted, subsequent calls are silently ignored. -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ void minlmrequesttermination(minlmstate* state, ae_state *_state) { state->userterminationneeded = ae_true; } /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatevgj(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); minlmcreatevj(n, m, x, state, _state); } /************************************************************************* This is obsolete function. Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefgj(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); minlmcreatefj(n, m, x, state, _state); } /************************************************************************* This function is considered obsolete since ALGLIB 3.1.0 and is present for backward compatibility only. We recommend to use MinLMCreateVJ, which provides similar, but more consistent and feature-rich interface. -- ALGLIB -- Copyright 30.03.2009 by Bochkanov Sergey *************************************************************************/ void minlmcreatefj(ae_int_t n, ae_int_t m, /* Real */ ae_vector* x, minlmstate* state, ae_state *_state) { _minlmstate_clear(state); ae_assert(n>=1, "MinLMCreateFJ: N<1!", _state); ae_assert(m>=1, "MinLMCreateFJ: M<1!", _state); ae_assert(x->cnt>=n, "MinLMCreateFJ: Length(X)teststep = (double)(0); state->n = n; state->m = m; state->algomode = 1; state->hasf = ae_true; state->hasfi = ae_false; state->hasg = ae_false; /* * init 2 */ minlm_lmprepare(n, m, ae_true, state, _state); minlmsetacctype(state, 0, _state); minlmsetcond(state, (double)(0), 0, _state); minlmsetxrep(state, ae_false, _state); minlmsetstpmax(state, (double)(0), _state); minlmrestartfrom(state, x, _state); } /************************************************************************* Prepare internal structures (except for RComm). Note: M must be zero for FGH mode, non-zero for V/VJ/FJ/FGJ mode. *************************************************************************/ static void minlm_lmprepare(ae_int_t n, ae_int_t m, ae_bool havegrad, minlmstate* state, ae_state *_state) { ae_int_t i; smoothnessmonitorinit(&state->smonitor, 0, 0, ae_false, _state); if( n<=0||m<0 ) { return; } if( havegrad ) { ae_vector_set_length(&state->g, n, _state); } if( m!=0 ) { ae_matrix_set_length(&state->j, m, n, _state); ae_vector_set_length(&state->fi, m, _state); ae_vector_set_length(&state->fibase, m, _state); ae_vector_set_length(&state->deltaf, m, _state); ae_vector_set_length(&state->fm1, m, _state); ae_vector_set_length(&state->fp1, m, _state); ae_vector_set_length(&state->fc1, m, _state); ae_vector_set_length(&state->gm1, m, _state); ae_vector_set_length(&state->gp1, m, _state); ae_vector_set_length(&state->gc1, m, _state); } else { ae_matrix_set_length(&state->h, n, n, _state); } ae_vector_set_length(&state->x, n, _state); ae_vector_set_length(&state->deltax, n, _state); ae_matrix_set_length(&state->quadraticmodel, n, n, _state); ae_vector_set_length(&state->xbase, n, _state); ae_vector_set_length(&state->gbase, n, _state); ae_vector_set_length(&state->xdir, n, _state); ae_vector_set_length(&state->tmp0, n, _state); /* * prepare internal L-BFGS */ for(i=0; i<=n-1; i++) { state->x.ptr.p_double[i] = (double)(0); } minlbfgscreate(n, ae_minint(minlm_additers, n, _state), &state->x, &state->internalstate, _state); minlbfgssetcond(&state->internalstate, 0.0, 0.0, 0.0, ae_minint(minlm_additers, n, _state), _state); /* * Prepare internal QP solver */ minqpcreate(n, &state->qpstate, _state); minqpsetalgoquickqp(&state->qpstate, 0.0, 0.0, coalesce(0.01*state->epsx, 1.0E-12, _state), 10, ae_true, _state); /* * Prepare boundary constraints */ ae_vector_set_length(&state->bndl, n, _state); ae_vector_set_length(&state->bndu, n, _state); ae_vector_set_length(&state->havebndl, n, _state); ae_vector_set_length(&state->havebndu, n, _state); for(i=0; i<=n-1; i++) { state->bndl.ptr.p_double[i] = _state->v_neginf; state->havebndl.ptr.p_bool[i] = ae_false; state->bndu.ptr.p_double[i] = _state->v_posinf; state->havebndu.ptr.p_bool[i] = ae_false; } /* * Prepare scaling matrix */ ae_vector_set_length(&state->s, n, _state); ae_vector_set_length(&state->lastscaleused, n, _state); for(i=0; i<=n-1; i++) { state->s.ptr.p_double[i] = 1.0; state->lastscaleused.ptr.p_double[i] = 1.0; } /* * Prepare linear constraints */ state->nec = 0; state->nic = 0; } /************************************************************************* Clears request fileds (to be sure that we don't forgot to clear something) *************************************************************************/ static void minlm_clearrequestfields(minlmstate* state, ae_state *_state) { state->needf = ae_false; state->needfg = ae_false; state->needfgh = ae_false; state->needfij = ae_false; state->needfi = ae_false; state->xupdated = ae_false; } /************************************************************************* Increases lambda, returns False when there is a danger of overflow *************************************************************************/ static ae_bool minlm_increaselambda(double* lambdav, double* nu, ae_state *_state) { double lnlambda; double lnnu; double lnlambdaup; double lnmax; ae_bool result; result = ae_false; lnlambda = ae_log(*lambdav, _state); lnlambdaup = ae_log(minlm_lambdaup, _state); lnnu = ae_log(*nu, _state); lnmax = ae_log(ae_maxrealnumber, _state); if( ae_fp_greater(lnlambda+lnlambdaup+lnnu,0.25*lnmax) ) { return result; } if( ae_fp_greater(lnnu+ae_log((double)(2), _state),lnmax) ) { return result; } *lambdav = *lambdav*minlm_lambdaup*(*nu); *nu = *nu*2; result = ae_true; return result; } /************************************************************************* Decreases lambda, but leaves it unchanged when there is danger of underflow. *************************************************************************/ static void minlm_decreaselambda(double* lambdav, double* nu, ae_state *_state) { *nu = (double)(1); if( ae_fp_less(ae_log(*lambdav, _state)+ae_log(minlm_lambdadown, _state),ae_log(ae_minrealnumber, _state)) ) { *lambdav = ae_minrealnumber; } else { *lambdav = *lambdav*minlm_lambdadown; } } /************************************************************************* This function compares actual decrease vs predicted decrease and updates LambdaV/Nu accordingly. INPUT PARAMETERS: QuadraticModel - array[N,N], full Hessian matrix of quadratic model at deltaX=0 GBase - array[N], gradient at deltaX=0 FBase - F(deltaX=0) N - size DeltaX - step vector FNew - new function value LambdaV - lambda-value, updated on exit Nu - Nu-multiplier, updated on exit On exit it returns: * Result=0 - if we have to continue iterations * Result<>0 - if termination with completion code Result is requested -- ALGLIB -- Copyright 17.02.2017 by Bochkanov Sergey *************************************************************************/ static ae_int_t minlm_checkdecrease(/* Real */ ae_matrix* quadraticmodel, /* Real */ ae_vector* gbase, double fbase, ae_int_t n, /* Real */ ae_vector* deltax, double fnew, double* lambdav, double* nu, ae_state *_state) { ae_int_t i; double v; double t; double predicteddecrease; double actualdecrease; ae_int_t result; result = 0; t = (double)(0); for(i=0; i<=n-1; i++) { v = ae_v_dotproduct(&quadraticmodel->ptr.pp_double[i][0], 1, &deltax->ptr.p_double[0], 1, ae_v_len(0,n-1)); t = t+deltax->ptr.p_double[i]*gbase->ptr.p_double[i]+0.5*deltax->ptr.p_double[i]*v; } predicteddecrease = -t; actualdecrease = -(fnew-fbase); if( ae_fp_less_eq(predicteddecrease,(double)(0)) ) { result = 7; return result; } v = actualdecrease/predicteddecrease; if( ae_fp_less(v,0.1) ) { if( !minlm_increaselambda(lambdav, nu, _state) ) { /* * Lambda is too large, we have to break iterations. */ result = 7; return result; } } if( ae_fp_greater(v,0.5) ) { minlm_decreaselambda(lambdav, nu, _state); } return result; } /************************************************************************* This function initializes step finder object with problem statement; model parameters specified during this call should not (and can not) change during object lifetime (although it is possible to re-initialize object with different settings). This function reuses internally allocated objects as much as possible. In addition to initializing step finder, this function enforces feasibility in initial point X passed to this function. It is important that LM iteration starts from feasible point and performs feasible steps; RETURN VALUE: True for successful initialization False for inconsistent constraints; you should not use step finder if it returned False. *************************************************************************/ static ae_bool minlm_minlmstepfinderinit(minlmstepfinder* state, ae_int_t n, ae_int_t m, ae_int_t maxmodelage, ae_bool hasfi, /* Real */ ae_vector* xbase, /* Real */ ae_vector* bndl, /* Real */ ae_vector* bndu, /* Real */ ae_matrix* cleic, ae_int_t nec, ae_int_t nic, /* Real */ ae_vector* s, double stpmax, double epsx, ae_state *_state) { ae_int_t i; ae_bool result; state->n = n; state->m = m; state->maxmodelage = maxmodelage; state->hasfi = hasfi; state->stpmax = stpmax; state->epsx = epsx; /* * Allocate temporaries, create QP solver, select QP algorithm */ rvectorsetlengthatleast(&state->bndl, n, _state); rvectorsetlengthatleast(&state->bndu, n, _state); rvectorsetlengthatleast(&state->s, n, _state); bvectorsetlengthatleast(&state->havebndl, n, _state); bvectorsetlengthatleast(&state->havebndu, n, _state); rvectorsetlengthatleast(&state->x, n, _state); rvectorsetlengthatleast(&state->xbase, n, _state); rvectorsetlengthatleast(&state->tmp0, n, _state); rvectorsetlengthatleast(&state->modeldiag, n, _state); ivectorsetlengthatleast(&state->tmpct, nec+nic, _state); rvectorsetlengthatleast(&state->xdir, n, _state); if( hasfi ) { rvectorsetlengthatleast(&state->fi, m, _state); rvectorsetlengthatleast(&state->fibase, m, _state); } for(i=0; i<=n-1; i++) { ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinLM: integrity check failed", _state); ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinLM: integrity check failed", _state); state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); state->s.ptr.p_double[i] = s->ptr.p_double[i]; } for(i=0; i<=nec-1; i++) { state->tmpct.ptr.p_int[i] = 0; } for(i=0; i<=nic-1; i++) { state->tmpct.ptr.p_int[nec+i] = -1; } minqpcreate(n, &state->qpstate, _state); if( nec+nic==0 ) { minqpsetalgoquickqp(&state->qpstate, 0.0, 0.0, coalesce(0.01*epsx, 1.0E-12, _state), 10, ae_true, _state); } else { minqpsetalgodenseaul(&state->qpstate, coalesce(0.01*epsx, 1.0E-12, _state), (double)(100), 10, _state); } minqpsetbc(&state->qpstate, bndl, bndu, _state); minqpsetlc(&state->qpstate, cleic, &state->tmpct, nec+nic, _state); minqpsetscale(&state->qpstate, s, _state); /* * Check feasibility of constraints: * * check/enforce box constraints (straightforward) * * prepare QP subproblem which return us a feasible point */ result = ae_true; for(i=0; i<=n-1; i++) { if( (state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i])&&ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { result = ae_false; return result; } if( state->havebndl.ptr.p_bool[i]&&ae_fp_less(xbase->ptr.p_double[i],state->bndl.ptr.p_double[i]) ) { xbase->ptr.p_double[i] = state->bndl.ptr.p_double[i]; } if( state->havebndu.ptr.p_bool[i]&&ae_fp_greater(xbase->ptr.p_double[i],state->bndu.ptr.p_double[i]) ) { xbase->ptr.p_double[i] = state->bndu.ptr.p_double[i]; } } if( nec+nic>0 ) { /* * Well, we have linear constraints... let's use heavy machinery. * * We will modify QP solver state below, but everything will be * restored in MinLMStepFinderStart(). */ sparsecreate(n, n, n, &state->tmpsp, _state); for(i=0; i<=n-1; i++) { sparseset(&state->tmpsp, i, i, 0.5, _state); state->tmp0.ptr.p_double[i] = (double)(0); } minqpsetstartingpointfast(&state->qpstate, xbase, _state); minqpsetoriginfast(&state->qpstate, xbase, _state); minqpsetlineartermfast(&state->qpstate, &state->tmp0, _state); minqpsetquadratictermsparse(&state->qpstate, &state->tmpsp, ae_true, _state); minqpoptimize(&state->qpstate, _state); minqpresultsbuf(&state->qpstate, xbase, &state->qprep, _state); } return result; } /************************************************************************* This function prepares LM step search session. *************************************************************************/ static void minlm_minlmstepfinderstart(minlmstepfinder* state, /* Real */ ae_matrix* quadraticmodel, /* Real */ ae_vector* gbase, double fbase, /* Real */ ae_vector* xbase, /* Real */ ae_vector* fibase, ae_int_t modelage, ae_state *_state) { ae_int_t i; ae_int_t n; n = state->n; ae_vector_set_length(&state->rstate.ia, 2+1, _state); ae_vector_set_length(&state->rstate.ba, 0+1, _state); ae_vector_set_length(&state->rstate.ra, 0+1, _state); state->rstate.stage = -1; state->modelage = modelage; state->fbase = fbase; if( state->hasfi ) { for(i=0; i<=state->m-1; i++) { state->fibase.ptr.p_double[i] = fibase->ptr.p_double[i]; } } for(i=0; i<=n-1; i++) { state->xbase.ptr.p_double[i] = xbase->ptr.p_double[i]; state->modeldiag.ptr.p_double[i] = quadraticmodel->ptr.pp_double[i][i]; } minqpsetstartingpointfast(&state->qpstate, xbase, _state); minqpsetoriginfast(&state->qpstate, xbase, _state); minqpsetlineartermfast(&state->qpstate, gbase, _state); minqpsetquadratictermfast(&state->qpstate, quadraticmodel, ae_true, 0.0, _state); } /************************************************************************* This function runs LM step search session. // // Find value of Levenberg-Marquardt damping parameter which: // * leads to positive definite damped model // * within bounds specified by StpMax // * generates step which decreases function value // // After this block IFlag is set to: // * -8, if infinities/NANs were detected in function values/gradient // * -3, if constraints are infeasible // * -2, if model update is needed (either Lambda growth is too large // or step is too short, but we can't rely on model and stop iterations) // * -1, if model is fresh, Lambda have grown too large, termination is needed // * 0, if everything is OK, continue iterations // * >0 - successful completion (step size is small enough) // // State.Nu can have any value on enter, but after exit it is set to 1.0 // *************************************************************************/ static ae_bool minlm_minlmstepfinderiteration(minlmstepfinder* state, double* lambdav, double* nu, /* Real */ ae_vector* xnew, /* Real */ ae_vector* deltax, ae_bool* deltaxready, /* Real */ ae_vector* deltaf, ae_bool* deltafready, ae_int_t* iflag, double* fnew, ae_int_t* ncholesky, ae_state *_state) { ae_int_t i; ae_bool bflag; double v; ae_int_t n; ae_int_t m; ae_bool result; /* * Reverse communication preparations * I know it looks ugly, but it works the same way * anywhere from C++ to Python. * * This code initializes locals by: * * random values determined during code * generation - on first subroutine call * * values from previous call - on subsequent calls */ if( state->rstate.stage>=0 ) { i = state->rstate.ia.ptr.p_int[0]; n = state->rstate.ia.ptr.p_int[1]; m = state->rstate.ia.ptr.p_int[2]; bflag = state->rstate.ba.ptr.p_bool[0]; v = state->rstate.ra.ptr.p_double[0]; } else { i = -838; n = 939; m = -526; bflag = ae_true; v = -541; } if( state->rstate.stage==0 ) { goto lbl_0; } if( state->rstate.stage==1 ) { goto lbl_1; } /* * Routine body */ *iflag = -99; n = state->n; m = state->m; lbl_2: if( ae_false ) { goto lbl_3; } *deltaxready = ae_false; *deltafready = ae_false; /* * Do we need model update? */ if( state->modelage>0&&ae_fp_greater_eq(*nu,minlm_suspiciousnu) ) { *iflag = -2; goto lbl_3; } /* * Setup quadratic solver and solve quadratic programming problem. * After problem is solved we'll try to bound step by StpMax * (Lambda will be increased if step size is too large). * * We use BFlag variable to indicate that we have to increase Lambda. * If it is False, we will try to increase Lambda and move to new iteration. */ bflag = ae_true; for(i=0; i<=n-1; i++) { state->tmp0.ptr.p_double[i] = state->modeldiag.ptr.p_double[i]+*lambdav/ae_sqr(state->s.ptr.p_double[i], _state); } minqprewritediagonal(&state->qpstate, &state->tmp0, _state); minqpoptimize(&state->qpstate, _state); minqpresultsbuf(&state->qpstate, xnew, &state->qprep, _state); *ncholesky = *ncholesky+state->qprep.ncholesky; if( state->qprep.terminationtype==-3 ) { /* * Infeasible constraints */ *iflag = -3; goto lbl_3; } if( state->qprep.terminationtype==-4||state->qprep.terminationtype==-5 ) { /* * Unconstrained direction of negative curvature was detected */ if( !minlm_increaselambda(lambdav, nu, _state) ) { *iflag = -1; goto lbl_3; } goto lbl_2; } ae_assert(state->qprep.terminationtype>0, "MinLM: unexpected completion code from QP solver", _state); ae_v_move(&state->xdir.ptr.p_double[0], 1, &xnew->ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_sub(&state->xdir.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(state->xdir.ptr.p_double[i]/state->s.ptr.p_double[i], _state); } if( ae_isfinite(v, _state) ) { v = ae_sqrt(v, _state); if( ae_fp_greater(state->stpmax,(double)(0))&&ae_fp_greater(v,state->stpmax) ) { bflag = ae_false; } } else { bflag = ae_false; } if( !bflag ) { /* * Solution failed: * try to increase lambda to make matrix positive definite and continue. */ if( !minlm_increaselambda(lambdav, nu, _state) ) { *iflag = -1; goto lbl_3; } goto lbl_2; } /* * Step in State.XDir and it is bounded by StpMax. * * We should check stopping conditions on step size here. * DeltaX, which is used for secant updates, is initialized here. * * This code is a bit tricky because sometimes XDir<>0, but * it is so small that XDir+XBase==XBase (in finite precision * arithmetics). So we set DeltaX to XBase, then * add XDir, and then subtract XBase to get exact value of * DeltaX. * * Step length is estimated using DeltaX. * * NOTE: stopping conditions are tested * for fresh models only (ModelAge=0) */ ae_v_move(&deltax->ptr.p_double[0], 1, &xnew->ptr.p_double[0], 1, ae_v_len(0,n-1)); ae_v_sub(&deltax->ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); *deltaxready = ae_true; v = 0.0; for(i=0; i<=n-1; i++) { v = v+ae_sqr(deltax->ptr.p_double[i]/state->s.ptr.p_double[i], _state); } v = ae_sqrt(v, _state); if( ae_fp_less_eq(v,state->epsx) ) { if( state->modelage==0 ) { /* * Step is too short, model is fresh and we can rely on it. * Terminating. */ *iflag = 2; goto lbl_3; } else { /* * Step is suspiciously short, but model is not fresh * and we can't rely on it. */ *iflag = -2; goto lbl_3; } } /* * Let's evaluate new step: * a) if we have Fi vector, we evaluate it using rcomm, and * then we manually calculate State.F as sum of squares of Fi[] * b) if we have F value, we just evaluate it through rcomm interface * * We prefer (a) because we may need Fi vector for additional * iterations */ ae_v_move(&state->x.ptr.p_double[0], 1, &xnew->ptr.p_double[0], 1, ae_v_len(0,n-1)); state->needf = ae_false; state->needfi = ae_false; if( !state->hasfi ) { goto lbl_4; } state->needfi = ae_true; state->rstate.stage = 0; goto lbl_rcomm; lbl_0: state->needfi = ae_false; v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); *fnew = v; ae_v_move(&deltaf->ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); ae_v_sub(&deltaf->ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); *deltafready = ae_true; goto lbl_5; lbl_4: state->needf = ae_true; state->rstate.stage = 1; goto lbl_rcomm; lbl_1: state->needf = ae_false; *fnew = state->f; lbl_5: if( !ae_isfinite(*fnew, _state) ) { /* * Integrity check failed, break! */ *iflag = -8; goto lbl_3; } if( ae_fp_greater_eq(*fnew,state->fbase) ) { /* * Increase lambda and continue */ if( !minlm_increaselambda(lambdav, nu, _state) ) { *iflag = -1; goto lbl_3; } goto lbl_2; } /* * We've found our step! */ *iflag = 0; goto lbl_3; goto lbl_2; lbl_3: *nu = (double)(1); ae_assert(((*iflag>=-3&&*iflag<=0)||*iflag==-8)||*iflag>0, "MinLM: internal integrity check failed!", _state); result = ae_false; return result; /* * Saving state */ lbl_rcomm: result = ae_true; state->rstate.ia.ptr.p_int[0] = i; state->rstate.ia.ptr.p_int[1] = n; state->rstate.ia.ptr.p_int[2] = m; state->rstate.ba.ptr.p_bool[0] = bflag; state->rstate.ra.ptr.p_double[0] = v; return result; } void _minlmstepfinder_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlmstepfinder *p = (minlmstepfinder*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->modeldiag, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fibase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); ae_vector_init(&p->xdir, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->choleskybuf, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmpct, 0, DT_INT, _state, make_automatic); _minqpstate_init(&p->qpstate, _state, make_automatic); _minqpreport_init(&p->qprep, _state, make_automatic); _sparsematrix_init(&p->tmpsp, _state, make_automatic); } void _minlmstepfinder_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlmstepfinder *dst = (minlmstepfinder*)_dst; minlmstepfinder *src = (minlmstepfinder*)_src; dst->n = src->n; dst->m = src->m; dst->stpmax = src->stpmax; dst->modelage = src->modelage; dst->maxmodelage = src->maxmodelage; dst->hasfi = src->hasfi; dst->epsx = src->epsx; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); dst->needf = src->needf; dst->needfi = src->needfi; dst->fbase = src->fbase; ae_vector_init_copy(&dst->modeldiag, &src->modeldiag, _state, make_automatic); ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); ae_vector_init_copy(&dst->fibase, &src->fibase, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic); ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic); ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); ae_vector_init_copy(&dst->xdir, &src->xdir, _state, make_automatic); ae_vector_init_copy(&dst->choleskybuf, &src->choleskybuf, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); ae_vector_init_copy(&dst->tmpct, &src->tmpct, _state, make_automatic); dst->actualdecrease = src->actualdecrease; dst->predicteddecrease = src->predicteddecrease; _minqpstate_init_copy(&dst->qpstate, &src->qpstate, _state, make_automatic); _minqpreport_init_copy(&dst->qprep, &src->qprep, _state, make_automatic); _sparsematrix_init_copy(&dst->tmpsp, &src->tmpsp, _state, make_automatic); } void _minlmstepfinder_clear(void* _p) { minlmstepfinder *p = (minlmstepfinder*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_vector_clear(&p->modeldiag); ae_vector_clear(&p->xbase); ae_vector_clear(&p->fibase); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->havebndl); ae_vector_clear(&p->havebndu); ae_vector_clear(&p->s); _rcommstate_clear(&p->rstate); ae_vector_clear(&p->xdir); ae_vector_clear(&p->choleskybuf); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->tmpct); _minqpstate_clear(&p->qpstate); _minqpreport_clear(&p->qprep); _sparsematrix_clear(&p->tmpsp); } void _minlmstepfinder_destroy(void* _p) { minlmstepfinder *p = (minlmstepfinder*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_vector_destroy(&p->modeldiag); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->fibase); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->havebndl); ae_vector_destroy(&p->havebndu); ae_vector_destroy(&p->s); _rcommstate_destroy(&p->rstate); ae_vector_destroy(&p->xdir); ae_vector_destroy(&p->choleskybuf); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->tmpct); _minqpstate_destroy(&p->qpstate); _minqpreport_destroy(&p->qprep); _sparsematrix_destroy(&p->tmpsp); } void _minlmstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlmstate *p = (minlmstate*)_p; ae_touch_ptr((void*)p); ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->h, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fibase, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gbase, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->quadraticmodel, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic); ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xnew, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->xdir, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->deltax, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->deltaf, 0, DT_REAL, _state, make_automatic); _smoothnessmonitor_init(&p->smonitor, _state, make_automatic); ae_vector_init(&p->lastscaleused, 0, DT_REAL, _state, make_automatic); _rcommstate_init(&p->rstate, _state, make_automatic); ae_vector_init(&p->choleskybuf, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fm1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->fc1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gm1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gp1, 0, DT_REAL, _state, make_automatic); ae_vector_init(&p->gc1, 0, DT_REAL, _state, make_automatic); _minlbfgsstate_init(&p->internalstate, _state, make_automatic); _minlbfgsreport_init(&p->internalrep, _state, make_automatic); _minqpstate_init(&p->qpstate, _state, make_automatic); _minqpreport_init(&p->qprep, _state, make_automatic); _minlmstepfinder_init(&p->finderstate, _state, make_automatic); } void _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlmstate *dst = (minlmstate*)_dst; minlmstate *src = (minlmstate*)_src; dst->n = src->n; dst->m = src->m; dst->diffstep = src->diffstep; dst->epsx = src->epsx; dst->maxits = src->maxits; dst->xrep = src->xrep; dst->stpmax = src->stpmax; dst->maxmodelage = src->maxmodelage; dst->makeadditers = src->makeadditers; ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); dst->f = src->f; ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic); ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic); ae_matrix_init_copy(&dst->h, &src->h, _state, make_automatic); ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); dst->needf = src->needf; dst->needfg = src->needfg; dst->needfgh = src->needfgh; dst->needfij = src->needfij; dst->needfi = src->needfi; dst->xupdated = src->xupdated; dst->userterminationneeded = src->userterminationneeded; dst->algomode = src->algomode; dst->hasf = src->hasf; dst->hasfi = src->hasfi; dst->hasg = src->hasg; ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); dst->fbase = src->fbase; ae_vector_init_copy(&dst->fibase, &src->fibase, _state, make_automatic); ae_vector_init_copy(&dst->gbase, &src->gbase, _state, make_automatic); ae_matrix_init_copy(&dst->quadraticmodel, &src->quadraticmodel, _state, make_automatic); ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic); ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic); ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic); ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic); ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic); dst->nec = src->nec; dst->nic = src->nic; dst->lambdav = src->lambdav; dst->nu = src->nu; dst->modelage = src->modelage; ae_vector_init_copy(&dst->xnew, &src->xnew, _state, make_automatic); ae_vector_init_copy(&dst->xdir, &src->xdir, _state, make_automatic); ae_vector_init_copy(&dst->deltax, &src->deltax, _state, make_automatic); ae_vector_init_copy(&dst->deltaf, &src->deltaf, _state, make_automatic); dst->deltaxready = src->deltaxready; dst->deltafready = src->deltafready; _smoothnessmonitor_init_copy(&dst->smonitor, &src->smonitor, _state, make_automatic); dst->teststep = src->teststep; ae_vector_init_copy(&dst->lastscaleused, &src->lastscaleused, _state, make_automatic); dst->repiterationscount = src->repiterationscount; dst->repterminationtype = src->repterminationtype; dst->repnfunc = src->repnfunc; dst->repnjac = src->repnjac; dst->repngrad = src->repngrad; dst->repnhess = src->repnhess; dst->repncholesky = src->repncholesky; _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); ae_vector_init_copy(&dst->choleskybuf, &src->choleskybuf, _state, make_automatic); ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); dst->actualdecrease = src->actualdecrease; dst->predicteddecrease = src->predicteddecrease; dst->xm1 = src->xm1; dst->xp1 = src->xp1; ae_vector_init_copy(&dst->fm1, &src->fm1, _state, make_automatic); ae_vector_init_copy(&dst->fp1, &src->fp1, _state, make_automatic); ae_vector_init_copy(&dst->fc1, &src->fc1, _state, make_automatic); ae_vector_init_copy(&dst->gm1, &src->gm1, _state, make_automatic); ae_vector_init_copy(&dst->gp1, &src->gp1, _state, make_automatic); ae_vector_init_copy(&dst->gc1, &src->gc1, _state, make_automatic); _minlbfgsstate_init_copy(&dst->internalstate, &src->internalstate, _state, make_automatic); _minlbfgsreport_init_copy(&dst->internalrep, &src->internalrep, _state, make_automatic); _minqpstate_init_copy(&dst->qpstate, &src->qpstate, _state, make_automatic); _minqpreport_init_copy(&dst->qprep, &src->qprep, _state, make_automatic); _minlmstepfinder_init_copy(&dst->finderstate, &src->finderstate, _state, make_automatic); } void _minlmstate_clear(void* _p) { minlmstate *p = (minlmstate*)_p; ae_touch_ptr((void*)p); ae_vector_clear(&p->x); ae_vector_clear(&p->fi); ae_matrix_clear(&p->j); ae_matrix_clear(&p->h); ae_vector_clear(&p->g); ae_vector_clear(&p->xbase); ae_vector_clear(&p->fibase); ae_vector_clear(&p->gbase); ae_matrix_clear(&p->quadraticmodel); ae_vector_clear(&p->bndl); ae_vector_clear(&p->bndu); ae_vector_clear(&p->havebndl); ae_vector_clear(&p->havebndu); ae_vector_clear(&p->s); ae_matrix_clear(&p->cleic); ae_vector_clear(&p->xnew); ae_vector_clear(&p->xdir); ae_vector_clear(&p->deltax); ae_vector_clear(&p->deltaf); _smoothnessmonitor_clear(&p->smonitor); ae_vector_clear(&p->lastscaleused); _rcommstate_clear(&p->rstate); ae_vector_clear(&p->choleskybuf); ae_vector_clear(&p->tmp0); ae_vector_clear(&p->fm1); ae_vector_clear(&p->fp1); ae_vector_clear(&p->fc1); ae_vector_clear(&p->gm1); ae_vector_clear(&p->gp1); ae_vector_clear(&p->gc1); _minlbfgsstate_clear(&p->internalstate); _minlbfgsreport_clear(&p->internalrep); _minqpstate_clear(&p->qpstate); _minqpreport_clear(&p->qprep); _minlmstepfinder_clear(&p->finderstate); } void _minlmstate_destroy(void* _p) { minlmstate *p = (minlmstate*)_p; ae_touch_ptr((void*)p); ae_vector_destroy(&p->x); ae_vector_destroy(&p->fi); ae_matrix_destroy(&p->j); ae_matrix_destroy(&p->h); ae_vector_destroy(&p->g); ae_vector_destroy(&p->xbase); ae_vector_destroy(&p->fibase); ae_vector_destroy(&p->gbase); ae_matrix_destroy(&p->quadraticmodel); ae_vector_destroy(&p->bndl); ae_vector_destroy(&p->bndu); ae_vector_destroy(&p->havebndl); ae_vector_destroy(&p->havebndu); ae_vector_destroy(&p->s); ae_matrix_destroy(&p->cleic); ae_vector_destroy(&p->xnew); ae_vector_destroy(&p->xdir); ae_vector_destroy(&p->deltax); ae_vector_destroy(&p->deltaf); _smoothnessmonitor_destroy(&p->smonitor); ae_vector_destroy(&p->lastscaleused); _rcommstate_destroy(&p->rstate); ae_vector_destroy(&p->choleskybuf); ae_vector_destroy(&p->tmp0); ae_vector_destroy(&p->fm1); ae_vector_destroy(&p->fp1); ae_vector_destroy(&p->fc1); ae_vector_destroy(&p->gm1); ae_vector_destroy(&p->gp1); ae_vector_destroy(&p->gc1); _minlbfgsstate_destroy(&p->internalstate); _minlbfgsreport_destroy(&p->internalrep); _minqpstate_destroy(&p->qpstate); _minqpreport_destroy(&p->qprep); _minlmstepfinder_destroy(&p->finderstate); } void _minlmreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { minlmreport *p = (minlmreport*)_p; ae_touch_ptr((void*)p); } void _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { minlmreport *dst = (minlmreport*)_dst; minlmreport *src = (minlmreport*)_src; dst->iterationscount = src->iterationscount; dst->terminationtype = src->terminationtype; dst->nfunc = src->nfunc; dst->njac = src->njac; dst->ngrad = src->ngrad; dst->nhess = src->nhess; dst->ncholesky = src->ncholesky; } void _minlmreport_clear(void* _p) { minlmreport *p = (minlmreport*)_p; ae_touch_ptr((void*)p); } void _minlmreport_destroy(void* _p) { minlmreport *p = (minlmreport*)_p; ae_touch_ptr((void*)p); } #endif }