diff --git a/doc/guide.rst b/doc/guide.rst index 44551ac4..fb263c47 100644 --- a/doc/guide.rst +++ b/doc/guide.rst @@ -41,9 +41,9 @@ where: * ``funcs`` is the dictionary of constraints and objective value(s) - * ``fail`` is a Boolean. False for successful evaluation and True for unsuccessful + * ``fail`` can be a Boolean or an int. False (or 0) for successful evaluation and True (or 1) for unsuccessful. Can also be 2 when using SNOPT and requesting a clean termination of the run. -If the Optimization problem is unconstrained, ``funcs`` will contain only the objective key(s). +If the Optimization problem is unconstrained, ``funcs`` will contain only the objective key(s). Design Variables ++++++++++++++++ @@ -64,7 +64,7 @@ non-zero initial value:: >>> optProb.addVar('var_name',lower=-10, upper=5, value=-2) The ``lower`` or ``upper`` keywords may be specified as ``None`` to -signify there is no bound on the variable. +signify there is no bound on the variable. Finally, an additional keyword argument ``scale`` can be specified which will perform an internal design variable scaling. The ``scale`` @@ -75,7 +75,7 @@ keyword will result in the following:: The purpose of the scale factor is ensure that design variables of widely different magnitudes can be used in the same optimization. Is it desirable to have the magnitude of all variables within an order of -magnitude or two of each other. +magnitude or two of each other. The ``addVarGroup`` call is similar to ``addVar`` except that it adds a group of 1 or more variables. These variables are then returned as a @@ -83,7 +83,7 @@ numpy array within the x-dictionary. For example, to add 10 variables with no lower bound, and a scale factor of 0.1:: >>> optProb.addVarGroup('con_group', 10, upper=2.5, scale=0.1) - + Constraints +++++++++++ @@ -113,14 +113,14 @@ will use the scaling factor to produce the following constraint:: con_optimizer = con_user * scale In the example above, the constraint values are divided by 10000, -which results in a upper bound (that the optimizer sees) of 1.0. +which results in a upper bound (that the optimizer sees) of 1.0. Constraints may also be flagged as liner using the ``linear=True`` keyword option. Some optimizers can perform special treatment on linear constraint, often ensuring that they are always satisfied exactly on every function call (SNOPT for example). Linear constraints also require the use of the ``wrt`` and ``jac`` keyword -arguments. These are explained below. +arguments. These are explained below. One of the major goals of ``pyOptSparse`` is to enable the use of sparse constraint jacobians. (Hence the 'Sparse` in the name!). @@ -134,7 +134,7 @@ representing the constraint jacobian, a ``dictionary of keys`` approach is used which allows incrementally specifying parts of the constraint jacobain. Consider the optimization problem given below:: - varA (3) varB (1) varC (3) + varA (3) varB (1) varC (3) +--------------------------------+ conA (2) | | X | X | ---------------------------------- @@ -162,7 +162,7 @@ that generates the hypothetical optimization problem is as follows:: Note that the order of the ``wrt`` (which stands for with-respect-to) is not significant. Furthermore, if the ``wrt`` argument is omitted -altogether, ``pyOptSparse`` assumes that the constraint is dense. +altogether, ``pyOptSparse`` assumes that the constraint is dense. Using the ``wrt`` keyword allows the user to determine the overall sparsity structure of the constraint jacobian. However, we have @@ -187,18 +187,18 @@ have then provided this constraint jacobian using the ``jac=`` keyword argument. This argument is a dictionary, and the keys must match the design variable sets given in the ``wrt`` to keyword. Essentially what we have done is specified the which blocks of the constraint rows are -non-zero, and provided the sparsity structure of ones that are sparse. +non-zero, and provided the sparsity structure of ones that are sparse. For linear constraints the values in ``jac`` are meaningful: They must be the actual linear constraint jacobian values (which do not change). For non-linear constraints, on the sparsity structure (non-zero pattern) is significant. The values themselves will be -determined by a call the sens() function. +determined by a call the sens() function. Also note, that the ``wrt`` and ``jac`` keyword arguments are only supported when user-supplied sensitivity is used. If one used the automatic gradient in ``pyOptSparse`` the constraint jacobian will -necessarily be dense. +necessarily be dense. Objectives ++++++++++ @@ -212,4 +212,4 @@ What this does is tell ``pyOptSparse`` that the key ``obj`` in the function returns will be taken as the objective. For optimizers that can do multi-objective optimization, (NSGA2 for example) multiple objectives can be added. Optimizers that can only handle one objective -enforce that only a single objective is added to the optimization description. +enforce that only a single objective is added to the optimization description. diff --git a/pyoptsparse/pyOpt_optimizer.py b/pyoptsparse/pyOpt_optimizer.py index 0fafd220..3aac9f40 100644 --- a/pyoptsparse/pyOpt_optimizer.py +++ b/pyoptsparse/pyOpt_optimizer.py @@ -316,7 +316,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): xScaled = self.optProb.invXScale * x + self.optProb.xOffset xuser = self.optProb.processX(xScaled) - masterFail = False + masterFail = 0 # Set basic parameters in history hist = {'xuser': xuser} @@ -337,7 +337,8 @@ def _masterFunc2(self, x, evaluate, writeHist=True): "return \"funcs\" *OR* \"funcs, fail\"") else: funcs = args - fail = False + fail = 0 + self.userObjTime += time.time()-timeA if self.optProb.bulk is None: self.userObjCalls += 1 @@ -359,7 +360,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.cache['fcon'] = copy.deepcopy(fcon) # Update fail flag - masterFail = masterFail or fail + masterFail = max(masterFail, fail) # fobj is now in cache returns.append(self.cache['fobj']) @@ -379,7 +380,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): "return \"funcs\" *OR* \"funcs, fail\"") else: funcs = args - fail = False + fail = 0 self.userObjTime += time.time()-timeA self.userObjCalls += 1 @@ -399,7 +400,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.cache['fcon'] = copy.deepcopy(fcon) # Update fail flag - masterFail = masterFail or fail + masterFail = max(masterFail, fail) # fcon is now in cache returns.append(self.cache['fcon']) @@ -430,7 +431,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): "return \"funcsSens\" *OR* \"funcsSens, fail\"") else: funcsSens = args - fail = False + fail = 0 self.userSensTime += time.time()-timeA self.userSensCalls += 1 @@ -450,7 +451,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.cache['gcon'] = gcon.copy() # Update fail flag - masterFail = masterFail or fail + masterFail = max(masterFail, fail) # gobj is now in the cache returns.append(self.cache['gobj']) @@ -482,7 +483,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): "return \"funcsSens\" *OR* \"funcsSens, fail\"") else: funcsSens = args - fail = False + fail = 0 self.userSensTime += time.time()-timeA self.userSensCalls += 1 @@ -501,7 +502,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.cache['gcon'] = gcon.copy() # Update fail flag - masterFail = masterFail or fail + masterFail = max(masterFail, fail) # gcon is now in the cache returns.append(self.cache['gcon']) diff --git a/pyoptsparse/pySNOPT/pySNOPT.py b/pyoptsparse/pySNOPT/pySNOPT.py index a34f814a..b3f8ae7f 100644 --- a/pyoptsparse/pySNOPT/pySNOPT.py +++ b/pyoptsparse/pySNOPT/pySNOPT.py @@ -466,7 +466,7 @@ def __call__(self, optProb, sens=None, sensStep=None, sensMode=None, # Setup argument list values start = numpy.array(self.options['Start'][1]) ObjAdd = numpy.array([0.], numpy.float) - ProbNm = numpy.array(self.optProb.name,'c') + ProbNm = numpy.array(self.optProb.name,'c') cdummy = -1111111 # this is a magic variable defined in SNOPT for undefined strings cw[51,:] = cdummy # we set these to cdummy so that a placeholder is used in printout cw[52,:] = cdummy @@ -557,21 +557,23 @@ def _userfg_wrap(self, mode, nnJac, x, fobj, gobj, fcon, gcon, if nState >= 2: return - fail = False + fail = 0 self.iu0 = iu[0] if mode == 0 or mode == 2: fobj, fcon, fail = self._masterFunc(x, ['fobj', 'fcon']) - if not fail: + if fail == 0: if mode == 1: if self.getOption('Derivative level') != 0: gobj, gcon, fail = self._masterFunc(x, ['gobj', 'gcon']) if mode == 2: if self.getOption('Derivative level') != 0: gobj, gcon, fail2 = self._masterFunc(x, ['gobj', 'gcon']) - fail = fail or fail2 + fail = max(fail, fail2) - if fail: + if fail == 1: mode = -1 + elif fail == 2: + mode = -2 # Flush the files to the buffer for all the people who like to # monitor the residual diff --git a/test/test_hs015.py b/test/test_hs015.py index df6e5a70..ad5a02cf 100644 --- a/test/test_hs015.py +++ b/test/test_hs015.py @@ -78,7 +78,7 @@ def optimize(self, optName, optOptions={}, storeHistory=False,places=5): histFileName = None sol = opt(optProb, sens=self.sens, storeHistory=histFileName) - + # Test printing solution to screen print(sol) @@ -92,6 +92,10 @@ def optimize(self, optName, optOptions={}, storeHistory=False,places=5): x1 = sol.variables['xvars'][0].value x2 = sol.variables['xvars'][1].value + dv = sol.getDVs() + self.assertAlmostEqual(x1, dv['xvars'][0], places=10) + self.assertAlmostEqual(x2, dv['xvars'][1], places=10) + diff = np.min(np.abs([xstar1[0] - x1, xstar2[0] - x1])) self.assertAlmostEqual(diff, 0.0, places=places) @@ -109,7 +113,7 @@ def test_nlpqlp(self): def test_ipopt(self): self.optimize('ipopt',places=4) - + def test_paropt(self): self.optimize('paropt') diff --git a/test/test_snopt_user_termination.py b/test/test_snopt_user_termination.py new file mode 100644 index 00000000..e0f9ea4d --- /dev/null +++ b/test/test_snopt_user_termination.py @@ -0,0 +1,114 @@ +""" +Tests that pyoptsparse allows a objective or gradient function to return a "2" as its fail status. +This status is passed to pySNOPT, which returns a -2 fail status, indicating that termination has +been requested by the user. +""" +from __future__ import print_function +import unittest + +import numpy as np + +from pyoptsparse import Optimization, SNOPT + + +class TerminateComp(object): + + def __init__(self, max_obj=1000, max_sens=1000): + self.max_obj = max_obj + self.max_sens = max_sens + self.obj_count = 0 + self.sens_count = 0 + + def objfunc(self, xdict): + """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ + x = xdict['x'] + y = xdict['y'] + funcs = {} + + funcs['obj'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 + conval = -x + y + funcs['con'] = conval + + if self.obj_count > self.max_obj: + fail = 2 + else: + self.obj_count += 1 + fail = False + + return funcs, fail + + def sens(self, xdict, funcs): + """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 + """ + x = xdict['x'] + y = xdict['y'] + funcsSens = {} + + funcsSens['obj', 'x'] = 2.0*x - 6.0 + y + funcsSens['obj', 'y'] = 2.0*y + 8.0 + x + + if self.sens_count > self.max_sens: + fail = 2 + else: + self.sens_count += 1 + fail = False + + return funcsSens, fail + + +con_jac = {} +con_jac['x'] = np.array(-1.0) +con_jac['y'] = np.array(1.0) + +class TestUserTerminationStatus(unittest.TestCase): + + def test_obj(self): + termcomp = TerminateComp(max_obj=2) + optProb = Optimization('Paraboloid', termcomp.objfunc) + + optProb.addVarGroup('x', 1, type='c', lower=-50.0, upper=50.0, value=0.0) + optProb.addVarGroup('y', 1, type='c', lower=-50.0, upper=50.0, value=0.0) + optProb.finalizeDesignVariables() + + optProb.addObj('obj') + + optProb.addConGroup('con', 1, lower=-15.0, upper=-15.0, wrt=['x', 'y'], linear=True, jac=con_jac) + + try: + opt = SNOPT() + except: + raise unittest.SkipTest('Optimizer not available: SNOPT') + + sol = opt(optProb, sens=termcomp.sens) + + self.assertEqual(termcomp.obj_count, 3) + + # Exit code for user requested termination. + self.assertEqual(sol.optInform['value'][0], 71) + + def test_sens(self): + termcomp = TerminateComp(max_sens=3) + optProb = Optimization('Paraboloid', termcomp.objfunc) + + optProb.addVarGroup('x', 1, type='c', lower=-50.0, upper=50.0, value=0.0) + optProb.addVarGroup('y', 1, type='c', lower=-50.0, upper=50.0, value=0.0) + optProb.finalizeDesignVariables() + + optProb.addObj('obj') + + optProb.addConGroup('con', 1, lower=-15.0, upper=-15.0, wrt=['x', 'y'], linear=True, jac=con_jac) + + try: + opt = SNOPT() + except: + raise unittest.SkipTest('Optimizer not available: SNOPT') + + sol = opt(optProb, sens=termcomp.sens) + + self.assertEqual(termcomp.sens_count, 4) + + # Exit code for user requested termination. + self.assertEqual(sol.optInform['value'][0], 71) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file