diff --git a/README.md b/README.md index 01b9a1d1..84647076 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,37 @@ -pyOptSparse - PYthon OPTimization (Sparse) Framework -==================================================== +# pyOptSparse - PYthon OPTimization (Sparse) Framework pyOptsparse is an object-oriented framework for formulating and solving nonlinear constrained optimization problems in an efficient, reusable, and portable manner. -It evolved from pyOpt through the usage of sparse matrices throughout the code. -Many optimization techniques can be used in pyOptSparse, including SNOPT, IPOPT, SLSQP, NSGA2, and more. +It is a fork of pyOpt that uses sparse matrices throughout the code to more efficiently handle large-scale optimization problems. +Many optimization techniques can be used in pyOptSparse, including both gradient-based and gradient-free methods. A visualization tool called OptView also comes packaged with pyOptSparse, which shows the optimization history through an interactive GUI. An example output from OptView is shown below. ![Example](doc/OptView.png) -Documentation -------------- +## Optimizer Support +pyOptSparse provides Python interfaces for a number of optimizers. +ALPSO, CONMIN, IPOPT, NLPQLP, NSGA2, PSQP, ParOpt, SLSQP, and SNOPT are currently tested and supported. +FSQP, AUGLAG, and NOMAD interfaces are also provided, but they are not tested nor supported. +We do not provide the source code for SNOPT and NLPQLP, due to their restrictive license requirements. +Please contact the authors of the respective codes if you wish to obtain them. +Furthermore, ParOpt and IPOPT are available as a open source package but must be installed separately. +Please see the documentation page of each optimizer for purchase and installation instructions. + +## Documentation Please see the [documentation](http://mdolab.engin.umich.edu/docs/packages/pyoptsparse/doc/index.html) for installation details and API documentation. To locally build the documentation, enter the `doc` folder and enter `make html` in terminal. You can then view the built documentation in the `_build` folder. -Citation --------- +## Testing +Testing is done with the `testflo` package developed by the openMDAO team, which can be installed via `pip install testflo`. +To run the tests, simply type `testflo .` in the root directory. -Please cite pyOpt and the authors of the respective optimization +## Citation +A pyOptSparse journal paper does not exist, instead please cite pyOpt and the authors of the respective optimization algorithms in any publication for which you find it useful. -For more background, theory, and figures, see the [pyOptSparse journal article](http://mdolab.engin.umich.edu/sites/default/files/pyOpt.pdf). +For more background, theory, and figures, see the [pyOpt journal article](http://mdolab.engin.umich.edu/sites/default/files/pyOpt.pdf). Perez, R. E., Jansen, P. W., and Martins, J. R. R. A., “pyOpt: A Python-Based Object-Oriented Framework for Nonlinear Constrained Optimization,” Structural and Multidisciplinary Optimization, Vol. 45, No. 1, January 2012, pp. 101–118. @@ -42,7 +51,5 @@ doi:10.1007/s00158-011-0666-3. Annote = {10.1007/s00158-011-0666-3}} ``` -License -------- - +## License Copyright 2019 MDO Lab. See the LICENSE file for details. diff --git a/doc/api/optimization.rst b/doc/api/optimization.rst index c9e3c234..9268bf8c 100644 --- a/doc/api/optimization.rst +++ b/doc/api/optimization.rst @@ -6,4 +6,4 @@ Optimziation .. currentmodule:: pyoptsparse.pyOpt_optimization .. autoclass:: Optimization - :members: __init__, addVar, addVarGroup, delVar, delVarSet, addCon, addConGroup, printSparsity, getDVs, setDVs, setDVsFromHistory + :members: __init__, addVar, addVarGroup, delVar, addCon, addConGroup, printSparsity, getDVs, setDVs, setDVsFromHistory diff --git a/doc/optimizers.rst b/doc/optimizers.rst index 3174a8e9..7baf8e15 100644 --- a/doc/optimizers.rst +++ b/doc/optimizers.rst @@ -13,5 +13,6 @@ Optimizers optimizers/pyfsqp optimizers/pynsga2 optimizers/pypsqp - optimizers/paropt - + optimizers/pyparopt + optimizers/pyconmin + optimizers/pyalpso diff --git a/doc/optimizers/paropt.rst b/doc/optimizers/paropt.rst deleted file mode 100644 index 2795c41d..00000000 --- a/doc/optimizers/paropt.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _paropt: - -ParOpt -====== -ParOpt is a nonlinear interior point optimizer that is designed for large parallel design optimization problems with structured sparse constraints. -ParOpt is open source and can be downloaded at `https://github.com/gjkennedy/paropt `_. -Documentation and examples for ParOpt can be found at `https://gjkennedy.github.io/paropt/ `_. - -API ---- - -.. currentmodule:: pyoptsparse.pyParOpt.ParOpt - -.. autoclass:: ParOpt - :members: __call__ diff --git a/doc/optimizers/pyalpso.rst b/doc/optimizers/pyalpso.rst new file mode 100644 index 00000000..a36e77d2 --- /dev/null +++ b/doc/optimizers/pyalpso.rst @@ -0,0 +1,13 @@ +.. _alpso: + +ALPSO +====== +Augmented Lagrangian Particle Swarm Optimizer (ALPSO) is a PSO method that uses the augmented Lagrangian approach to handle constraints. + +API +--- +.. currentmodule:: pyoptsparse.pyALPSO.pyALPSO + +.. autoclass:: ALPSO + :members: __call__ + diff --git a/doc/optimizers/pyconmin.rst b/doc/optimizers/pyconmin.rst new file mode 100644 index 00000000..32330386 --- /dev/null +++ b/doc/optimizers/pyconmin.rst @@ -0,0 +1,13 @@ +.. _conmin: + +CONMIN +====== +CONstrained function MINimization (CONMIN) is a gradient-based optimizer that uses the methods of feasible directions. + +API +--- +.. currentmodule:: pyoptsparse.pyCONMIN.pyCONMIN + +.. autoclass:: CONMIN + :members: __call__ + diff --git a/doc/optimizers/pyfsqp.rst b/doc/optimizers/pyfsqp.rst index 84840072..9b2049b5 100644 --- a/doc/optimizers/pyfsqp.rst +++ b/doc/optimizers/pyfsqp.rst @@ -9,6 +9,8 @@ of handling multiple competing linear and nonlinear objective functions (minimax), linear and nonlinear inequality constraints, as well as linear and nonlinear equality constraints +.. warning:: FSQP build fails, and is therefore deprecated. + API --- diff --git a/doc/optimizers/pyipopt.rst b/doc/optimizers/pyipopt.rst index b1134bc5..96531e5a 100644 --- a/doc/optimizers/pyipopt.rst +++ b/doc/optimizers/pyipopt.rst @@ -2,6 +2,15 @@ IPOPT ===== +IPOPT (Interior Point OPTimizer) is an open source interior point optimizer, designed for large-scale nonlinear optimization. +The source code can be found `here `_. +The latest version we support is 3.11.7. + +.. warning:: Currently only Python 2 is supported for IPOPT. Python 3 support + will be provided once the IPOPT support is upgraded to a more recent package. + +Installation +------------ Install instructions for ``pyIPOPT``. @@ -11,7 +20,7 @@ Install instructions for ``pyIPOPT``. #. Rename the directory from ``Ipopt-x.x.x`` to ``Ipopt`` -#. Copy ``ma27ad.f`` from the ``pyOptSparse`` bitbucket page into the ``Ipopt/ThirdParty/HSLold/`` directory +#. Obtain the MA27 linear solver from `HSL `_. Rename the source file ``ma27ad.f`` and put it in the ``Ipopt/ThirdParty/HSLold/`` directory #. Go to:: @@ -19,7 +28,7 @@ Install instructions for ``pyIPOPT``. and run:: - sh ./get.Blas + sh ./get.Blas This will download a blas copy and ``Ipopt`` will use that. @@ -29,19 +38,19 @@ Install instructions for ``pyIPOPT``. and run:: - sh ./get.Lapack + sh ./get.Lapack + +#. Run in the root directory:: -#. Run in the root directory - :: $ ./configure --disable-linear-solver-loader -#. Now make - :: +#. Now make:: + $ make install #. You must add the ``lib`` directory ``Ipopt`` to your - ``LD_LIBRARY_PATH`` variable for things to work right - :: + ``LD_LIBRARY_PATH`` variable for things to work right:: + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/user/hg/pyoptsparse/pyoptsparse/pyIPOPT/Ipopt/lib #. Now the pyOptSparse builder (run from the root directory) should take care of the rest. diff --git a/doc/optimizers/pynlpqlp.rst b/doc/optimizers/pynlpqlp.rst index 20f26de4..76e6a7f3 100644 --- a/doc/optimizers/pynlpqlp.rst +++ b/doc/optimizers/pynlpqlp.rst @@ -10,7 +10,10 @@ Lagrangian function and a linearization of the constraints. To generate a search direction a quadratic subproblem is formulated and solved. The line search can be performed with respect to two alternative merit functions, and the Hessian approximation is updated -by a modified BFGS formula. +by a modified BFGS formula. + +NLPQLP is a proprietary software, which can be obtained `here `_. +The latest version supported is v4.2.2. API --- diff --git a/doc/optimizers/pynsga2.rst b/doc/optimizers/pynsga2.rst index 12ac306c..b156c152 100644 --- a/doc/optimizers/pynsga2.rst +++ b/doc/optimizers/pynsga2.rst @@ -9,10 +9,10 @@ optimization problems. The algorithm attempts to perform global optimization, while enforcing constraints using a tournament selection-based strategy -.. warning:: Currently, the Python wrapper currently does catch +.. warning:: Currently, the Python wrapper does not catch exceptions. If there is **any** error in the user-supplied function, you will get a seg-fault and no idea where it happened. Please make - sure the objective is without errors before trying to nsga2. + sure the objective is without errors before trying to use nsga2. API --- diff --git a/doc/optimizers/pyparopt.rst b/doc/optimizers/pyparopt.rst new file mode 100644 index 00000000..c6b80d00 --- /dev/null +++ b/doc/optimizers/pyparopt.rst @@ -0,0 +1,22 @@ +.. _paropt: + +ParOpt +====== +ParOpt is a nonlinear interior point optimizer that is designed for large parallel design optimization problems with structured sparse constraints. +ParOpt is open source and can be downloaded at `https://github.com/gjkennedy/paropt `_. +Documentation and examples for ParOpt can be found at `https://gjkennedy.github.io/paropt/ `_. +ParOpt does not provide version tagging, but the commit ``f692160`` from October 2019 has been verified to work. + +Installation +------------ +Please follow the instructions `here `_ to install ParOpt as a separate Python package. +Make sure that the package is named ``paropt`` and the installation location can be found by Python, so that ``from paropt import ParOpt`` works within the pyOptSparse folder. +This typically requires installing it in a location which is already present under ``$PYTHONPATH`` environment variable, or you can modify the ``.bashrc`` file and manually append the path. + +API +--- + +.. currentmodule:: pyoptsparse.pyParOpt.ParOpt + +.. autoclass:: ParOpt + :members: __call__ diff --git a/doc/optimizers/pyslsqp.rst b/doc/optimizers/pyslsqp.rst index 885ba07b..0be635c2 100644 --- a/doc/optimizers/pyslsqp.rst +++ b/doc/optimizers/pyslsqp.rst @@ -8,6 +8,8 @@ the B–matrix and an L1–test function in the step–length algorithm. The optimizer uses a slightly modified version of Lawson and Hanson’s NNLS nonlinear least-squares solver. +The version provided is the original source code from 1991 by Dieter Kraft. + API --- .. currentmodule:: pyoptsparse.pySLSQP.pySLSQP diff --git a/doc/optimizers/pysnopt.rst b/doc/optimizers/pysnopt.rst index b76d8983..4d0e7ebd 100644 --- a/doc/optimizers/pysnopt.rst +++ b/doc/optimizers/pysnopt.rst @@ -11,6 +11,7 @@ infeasibility in the original problem and in the quadratic programming subproblems. The Hessian of the Lagrangian is approximated using the BFGS quasi-Newton update. +We currently test against both v7.2 and v7.7. Installation ------------ diff --git a/pyoptsparse/pyNLPQLP/pyNLPQLP.py b/pyoptsparse/pyNLPQLP/pyNLPQLP.py index 1890a6da..0ef8ff8a 100644 --- a/pyoptsparse/pyNLPQLP/pyNLPQLP.py +++ b/pyoptsparse/pyNLPQLP/pyNLPQLP.py @@ -229,8 +229,8 @@ def nlgrad(m, me, mmax, n, f, g, df, dg, x, active, wa): # xs, ff, and gg have to have an extra dimension # associated with them for the NP. We will do this # correctly even though np is hard-coded to 1. - xs = numpy.array([xs]).T - f = numpy.array([ff]) + xs = numpy.array(xs).T + f = numpy.array(ff) g = numpy.zeros((mmax, np)) df = numpy.zeros(nmax) diff --git a/release_notes.md b/release_notes.md new file mode 100644 index 00000000..4f0859ae --- /dev/null +++ b/release_notes.md @@ -0,0 +1,16 @@ +# Release Notes for pyOptSparse v1.0 + +October 22, 2019 + +v1.0 is the first major release of pyOptSparse. +As such, the release notes will only highlight a few recent developments, rather than serve as an exhaustive list. + +## Bug Fixes: +- various minor fixes to code testing + +## New Features: +- ParOpt has been added as a new optimizer +- Optimal Lagrange multipliers are now saved in the solution object as `lambdastar`, for those optimizers that provide this variable + +## Changes to code behavior: +- SNOPT will no longer perform the additional function call after finishing the optimization. This was a feature of SNOPT meant for the user to perform any finalization and clean up, and write out any necessary files prior to the end of the program. However, this option was never provided to pySNOPT users, and therefore has been disabled. This will reduce the number of function evaluations for every optimization by one, with no impact on the final result. **However, the user can no longer rely on using `db[db['last']]` to retrieve the optimal design, since it may not be the last function evaluation.** The optimal design vector is stored under the key `xs`. \ No newline at end of file diff --git a/test/test_hs015.py b/test/test_hs015.py index 32b9c1a0..60181175 100644 --- a/test/test_hs015.py +++ b/test/test_hs015.py @@ -44,7 +44,7 @@ def sens(self, xdict, funcs): return funcsSens, fail - def optimize(self, optName, optOptions={}, storeHistory=False): + def optimize(self, optName, optOptions={}, storeHistory=False,places=5): # Optimization Object optProb = Optimization('HS15 Constraint Problem', self.objfunc) @@ -82,7 +82,7 @@ def optimize(self, optName, optOptions={}, storeHistory=False): # Check Solution fobj = sol.objectives['obj'].value diff = np.min(np.abs([fobj - 306.5, fobj - 360.379767])) - self.assertAlmostEqual(diff, 0.0, places=5) + self.assertAlmostEqual(diff, 0.0, places=places) xstar1 = (0.5, 2.0) xstar2 = (-0.79212322, -1.26242985) @@ -90,10 +90,10 @@ def optimize(self, optName, optOptions={}, storeHistory=False): x2 = sol.variables['xvars'][1].value diff = np.min(np.abs([xstar1[0] - x1, xstar2[0] - x1])) - self.assertAlmostEqual(diff, 0.0, places=5) + self.assertAlmostEqual(diff, 0.0, places=places) diff = np.min(np.abs([xstar1[1] - x2, xstar2[1] - x2])) - self.assertAlmostEqual(diff, 0.0, places=5) + self.assertAlmostEqual(diff, 0.0, places=places) def test_snopt(self): self.optimize('snopt') @@ -104,11 +104,8 @@ def test_slsqp(self): def test_nlpqlp(self): self.optimize('nlpqlp') - def test_fsqp(self): - self.optimize('fsqp') - def test_ipopt(self): - self.optimize('ipopt') + self.optimize('ipopt',places=4) def test_paropt(self): self.optimize('paropt') @@ -121,8 +118,5 @@ def test_conmin(self): def test_psqp(self): self.optimize('psqp') - def test_paropt(self): - self.optimize('paropt') - if __name__ == "__main__": unittest.main() diff --git a/test/test_hs071.py b/test/test_hs071.py index b7aa9186..b2e39f95 100644 --- a/test/test_hs071.py +++ b/test/test_hs071.py @@ -78,9 +78,6 @@ def test_slsqp(self): def test_nlpqlp(self): self.optimize('nlpqlp') - def test_fsqp(self): - self.optimize('fsqp') - def test_ipopt(self): self.optimize('ipopt')