Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/optimize/tnc.py : 27%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# TNC Python interface
2# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
4# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org)
6# Permission is hereby granted, free of charge, to any person obtaining a
7# copy of this software and associated documentation files (the
8# "Software"), to deal in the Software without restriction, including
9# without limitation the rights to use, copy, modify, merge, publish,
10# distribute, sublicense, and/or sell copies of the Software, and to
11# permit persons to whom the Software is furnished to do so, subject to
12# the following conditions:
14# The above copyright notice and this permission notice shall be included
15# in all copies or substantial portions of the Software.
17# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25"""
26TNC: A Python interface to the TNC non-linear optimizer
28TNC is a non-linear optimizer. To use it, you must provide a function to
29minimize. The function must take one argument: the list of coordinates where to
30evaluate the function; and it must return either a tuple, whose first element is the
31value of the function, and whose second argument is the gradient of the function
32(as a list of values); or None, to abort the minimization.
33"""
35from scipy.optimize import moduleTNC
36from .optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
37 _prepare_scalar_function)
38from ._constraints import old_bound_to_new
40from numpy import inf, array, zeros, asfarray
42__all__ = ['fmin_tnc']
45MSG_NONE = 0 # No messages
46MSG_ITER = 1 # One line per iteration
47MSG_INFO = 2 # Informational messages
48MSG_VERS = 4 # Version info
49MSG_EXIT = 8 # Exit reasons
50MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
52MSGS = {
53 MSG_NONE: "No messages",
54 MSG_ITER: "One line per iteration",
55 MSG_INFO: "Informational messages",
56 MSG_VERS: "Version info",
57 MSG_EXIT: "Exit reasons",
58 MSG_ALL: "All messages"
59}
61INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
62LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
63FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
64XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
65MAXFUN = 3 # Max. number of function evaluations reached
66LSFAIL = 4 # Linear search failed
67CONSTANT = 5 # All lower bounds are equal to the upper bounds
68NOPROGRESS = 6 # Unable to progress
69USERABORT = 7 # User requested end of minimization
71RCSTRINGS = {
72 INFEASIBLE: "Infeasible (lower bound > upper bound)",
73 LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
74 FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
75 XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
76 MAXFUN: "Max. number of function evaluations reached",
77 LSFAIL: "Linear search failed",
78 CONSTANT: "All lower bounds are equal to the upper bounds",
79 NOPROGRESS: "Unable to progress",
80 USERABORT: "User requested end of minimization"
81}
83# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
84# SciPy
87def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
88 bounds=None, epsilon=1e-8, scale=None, offset=None,
89 messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
90 stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
91 rescale=-1, disp=None, callback=None):
92 """
93 Minimize a function with variables subject to bounds, using
94 gradient information in a truncated Newton algorithm. This
95 method wraps a C implementation of the algorithm.
97 Parameters
98 ----------
99 func : callable ``func(x, *args)``
100 Function to minimize. Must do one of:
102 1. Return f and g, where f is the value of the function and g its
103 gradient (a list of floats).
105 2. Return the function value but supply gradient function
106 separately as `fprime`.
108 3. Return the function value and set ``approx_grad=True``.
110 If the function returns None, the minimization
111 is aborted.
112 x0 : array_like
113 Initial estimate of minimum.
114 fprime : callable ``fprime(x, *args)``, optional
115 Gradient of `func`. If None, then either `func` must return the
116 function value and the gradient (``f,g = func(x, *args)``)
117 or `approx_grad` must be True.
118 args : tuple, optional
119 Arguments to pass to function.
120 approx_grad : bool, optional
121 If true, approximate the gradient numerically.
122 bounds : list, optional
123 (min, max) pairs for each element in x0, defining the
124 bounds on that parameter. Use None or +/-inf for one of
125 min or max when there is no bound in that direction.
126 epsilon : float, optional
127 Used if approx_grad is True. The stepsize in a finite
128 difference approximation for fprime.
129 scale : array_like, optional
130 Scaling factors to apply to each variable. If None, the
131 factors are up-low for interval bounded variables and
132 1+|x| for the others. Defaults to None.
133 offset : array_like, optional
134 Value to subtract from each variable. If None, the
135 offsets are (up+low)/2 for interval bounded variables
136 and x for the others.
137 messages : int, optional
138 Bit mask used to select messages display during
139 minimization values defined in the MSGS dict. Defaults to
140 MGS_ALL.
141 disp : int, optional
142 Integer interface to messages. 0 = no message, 5 = all messages
143 maxCGit : int, optional
144 Maximum number of hessian*vector evaluations per main
145 iteration. If maxCGit == 0, the direction chosen is
146 -gradient if maxCGit < 0, maxCGit is set to
147 max(1,min(50,n/2)). Defaults to -1.
148 maxfun : int, optional
149 Maximum number of function evaluation. If None, maxfun is
150 set to max(100, 10*len(x0)). Defaults to None.
151 eta : float, optional
152 Severity of the line search. If < 0 or > 1, set to 0.25.
153 Defaults to -1.
154 stepmx : float, optional
155 Maximum step for the line search. May be increased during
156 call. If too small, it will be set to 10.0. Defaults to 0.
157 accuracy : float, optional
158 Relative precision for finite difference calculations. If
159 <= machine_precision, set to sqrt(machine_precision).
160 Defaults to 0.
161 fmin : float, optional
162 Minimum function value estimate. Defaults to 0.
163 ftol : float, optional
164 Precision goal for the value of f in the stopping criterion.
165 If ftol < 0.0, ftol is set to 0.0 defaults to -1.
166 xtol : float, optional
167 Precision goal for the value of x in the stopping
168 criterion (after applying x scaling factors). If xtol <
169 0.0, xtol is set to sqrt(machine_precision). Defaults to
170 -1.
171 pgtol : float, optional
172 Precision goal for the value of the projected gradient in
173 the stopping criterion (after applying x scaling factors).
174 If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
175 Setting it to 0.0 is not recommended. Defaults to -1.
176 rescale : float, optional
177 Scaling factor (in log10) used to trigger f value
178 rescaling. If 0, rescale at each iteration. If a large
179 value, never rescale. If < 0, rescale is set to 1.3.
180 callback : callable, optional
181 Called after each iteration, as callback(xk), where xk is the
182 current parameter vector.
184 Returns
185 -------
186 x : ndarray
187 The solution.
188 nfeval : int
189 The number of function evaluations.
190 rc : int
191 Return code, see below
193 See also
194 --------
195 minimize: Interface to minimization algorithms for multivariate
196 functions. See the 'TNC' `method` in particular.
198 Notes
199 -----
200 The underlying algorithm is truncated Newton, also called
201 Newton Conjugate-Gradient. This method differs from
202 scipy.optimize.fmin_ncg in that
204 1. it wraps a C implementation of the algorithm
205 2. it allows each variable to be given an upper and lower bound.
207 The algorithm incorporates the bound constraints by determining
208 the descent direction as in an unconstrained truncated Newton,
209 but never taking a step-size large enough to leave the space
210 of feasible x's. The algorithm keeps track of a set of
211 currently active constraints, and ignores them when computing
212 the minimum allowable step size. (The x's associated with the
213 active constraint are kept fixed.) If the maximum allowable
214 step size is zero then a new constraint is added. At the end
215 of each iteration one of the constraints may be deemed no
216 longer active and removed. A constraint is considered
217 no longer active is if it is currently active
218 but the gradient for that variable points inward from the
219 constraint. The specific constraint removed is the one
220 associated with the variable of largest index whose
221 constraint is no longer active.
223 Return codes are defined as follows::
225 -1 : Infeasible (lower bound > upper bound)
226 0 : Local minimum reached (|pg| ~= 0)
227 1 : Converged (|f_n-f_(n-1)| ~= 0)
228 2 : Converged (|x_n-x_(n-1)| ~= 0)
229 3 : Max. number of function evaluations reached
230 4 : Linear search failed
231 5 : All lower bounds are equal to the upper bounds
232 6 : Unable to progress
233 7 : User requested end of minimization
235 References
236 ----------
237 Wright S., Nocedal J. (2006), 'Numerical Optimization'
239 Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
240 SIAM Journal of Numerical Analysis 21, pp. 770-778
242 """
243 # handle fprime/approx_grad
244 if approx_grad:
245 fun = func
246 jac = None
247 elif fprime is None:
248 fun = MemoizeJac(func)
249 jac = fun.derivative
250 else:
251 fun = func
252 jac = fprime
254 if disp is not None: # disp takes precedence over messages
255 mesg_num = disp
256 else:
257 mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
258 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
259 # build options
260 opts = {'eps': epsilon,
261 'scale': scale,
262 'offset': offset,
263 'mesg_num': mesg_num,
264 'maxCGit': maxCGit,
265 'maxfun': maxfun,
266 'eta': eta,
267 'stepmx': stepmx,
268 'accuracy': accuracy,
269 'minfev': fmin,
270 'ftol': ftol,
271 'xtol': xtol,
272 'gtol': pgtol,
273 'rescale': rescale,
274 'disp': False}
276 res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
278 return res['x'], res['nfev'], res['status']
281def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
282 eps=1e-8, scale=None, offset=None, mesg_num=None,
283 maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
284 minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
285 callback=None, finite_diff_rel_step=None, maxfun=None,
286 **unknown_options):
287 """
288 Minimize a scalar function of one or more variables using a truncated
289 Newton (TNC) algorithm.
291 Options
292 -------
293 eps : float or ndarray
294 If `jac is None` the absolute step size used for numerical
295 approximation of the jacobian via forward differences.
296 scale : list of floats
297 Scaling factors to apply to each variable. If None, the
298 factors are up-low for interval bounded variables and
299 1+|x] fo the others. Defaults to None.
300 offset : float
301 Value to subtract from each variable. If None, the
302 offsets are (up+low)/2 for interval bounded variables
303 and x for the others.
304 disp : bool
305 Set to True to print convergence messages.
306 maxCGit : int
307 Maximum number of hessian*vector evaluations per main
308 iteration. If maxCGit == 0, the direction chosen is
309 -gradient if maxCGit < 0, maxCGit is set to
310 max(1,min(50,n/2)). Defaults to -1.
311 maxiter : int, optional
312 Maximum number of function evaluations. This keyword is deprecated
313 in favor of `maxfun`. Only if `maxfun` is None is this keyword used.
314 eta : float
315 Severity of the line search. If < 0 or > 1, set to 0.25.
316 Defaults to -1.
317 stepmx : float
318 Maximum step for the line search. May be increased during
319 call. If too small, it will be set to 10.0. Defaults to 0.
320 accuracy : float
321 Relative precision for finite difference calculations. If
322 <= machine_precision, set to sqrt(machine_precision).
323 Defaults to 0.
324 minfev : float
325 Minimum function value estimate. Defaults to 0.
326 ftol : float
327 Precision goal for the value of f in the stopping criterion.
328 If ftol < 0.0, ftol is set to 0.0 defaults to -1.
329 xtol : float
330 Precision goal for the value of x in the stopping
331 criterion (after applying x scaling factors). If xtol <
332 0.0, xtol is set to sqrt(machine_precision). Defaults to
333 -1.
334 gtol : float
335 Precision goal for the value of the projected gradient in
336 the stopping criterion (after applying x scaling factors).
337 If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
338 Setting it to 0.0 is not recommended. Defaults to -1.
339 rescale : float
340 Scaling factor (in log10) used to trigger f value
341 rescaling. If 0, rescale at each iteration. If a large
342 value, never rescale. If < 0, rescale is set to 1.3.
343 finite_diff_rel_step : None or array_like, optional
344 If `jac in ['2-point', '3-point', 'cs']` the relative step size to
345 use for numerical approximation of the jacobian. The absolute step
346 size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
347 possibly adjusted to fit into the bounds. For ``method='3-point'``
348 the sign of `h` is ignored. If None (default) then step is selected
349 automatically.
350 maxfun : int
351 Maximum number of function evaluations. If None, `maxfun` is
352 set to max(100, 10*len(x0)). Defaults to None.
353 """
354 _check_unknown_options(unknown_options)
355 fmin = minfev
356 pgtol = gtol
358 x0 = asfarray(x0).flatten()
359 n = len(x0)
361 if bounds is None:
362 bounds = [(None,None)] * n
363 if len(bounds) != n:
364 raise ValueError('length of x0 != length of bounds')
365 new_bounds = old_bound_to_new(bounds)
367 if mesg_num is not None:
368 messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
369 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
370 elif disp:
371 messages = MSG_ALL
372 else:
373 messages = MSG_NONE
375 sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
376 finite_diff_rel_step=finite_diff_rel_step,
377 bounds=new_bounds)
378 func_and_grad = sf.fun_and_grad
380 """
381 low, up : the bounds (lists of floats)
382 if low is None, the lower bounds are removed.
383 if up is None, the upper bounds are removed.
384 low and up defaults to None
385 """
386 low = zeros(n)
387 up = zeros(n)
388 for i in range(n):
389 if bounds[i] is None:
390 l, u = -inf, inf
391 else:
392 l,u = bounds[i]
393 if l is None:
394 low[i] = -inf
395 else:
396 low[i] = l
397 if u is None:
398 up[i] = inf
399 else:
400 up[i] = u
402 if scale is None:
403 scale = array([])
405 if offset is None:
406 offset = array([])
408 if maxfun is None:
409 if maxiter is not None:
410 maxfun = maxiter
411 else:
412 maxfun = max(100, 10*len(x0))
414 rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
415 offset, messages, maxCGit, maxfun,
416 eta, stepmx, accuracy, fmin, ftol,
417 xtol, pgtol, rescale, callback)
419 funv, jacv = func_and_grad(x)
421 return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
422 nit=nit, status=rc, message=RCSTRINGS[rc],
423 success=(-1 < rc < 3))
426if __name__ == '__main__':
427 # Examples for TNC
429 def example():
430 print("Example")
432 # A function to minimize
433 def function(x):
434 f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
435 g = [0,0]
436 g[0] = 2.0*x[0]
437 g[1] = 3.0*pow(abs(x[1]),2.0)
438 if x[1] < 0:
439 g[1] = -g[1]
440 return f, g
442 # Optimizer call
443 x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
445 print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
446 print("x =", x)
447 print("exact value = [0, 1]")
448 print()
450 example()