pythonnlopt

Nlopt no catch ForcedStop in Constraints function of add_inequality_constraint


I rewrite [LikelihoodProfiler]: https://github.com/insysbio/LikelihoodProfiler.jl from Julia to Python. I need write constraints function for [nonlinear constraints]: https://nlopt.readthedocs.io/en/latest/NLopt_Python_Reference/#nonlinear-constraints as if we get some values we throw [force stop exception]: https://nlopt.readthedocs.io/en/latest/NLopt_Python_Reference/#exc Nlopt must handle the exception and return result with special code.

In [Julia it looks]: https://github.com/insysbio/LikelihoodProfiler.jl/blob/master/src/cico_one_pass.jl

function constraints_func(x, g)
        loss = loss_func(x)
        if (loss < 0.) && (scan_func(x) > scan_bound)
            throw(ForcedStop("Out of the scan bound but in ll constraint."))
        #elseif isapprox(loss, 0., atol=loss_tol)
        #    @warn "loss_tol reached... but..."
        #    return loss
        else
            return loss
        end
    end

 opt = Opt(:LN_AUGLAG, n_theta)
    ftol_abs!(opt, scan_tol)
    max_objective!(
        opt,
        (x, g) -> scan_func(x)
        )
    lb = [theta_bounds[i][1] for i in 1:n_theta] # minimum.(theta_bounds)
    ub = [theta_bounds[i][2] for i in 1:n_theta] # maximum.(theta_bounds)
    lower_bounds!(opt, lb)
    upper_bounds!(opt, ub)
    local_optimizer!(opt, local_opt)
    maxeval!(opt, max_iter)

    # inequality constraints
    inequality_constraint!(
        opt,
        constraints_func,
        loss_tol
    )

    # start optimization
    (optf, optx, ret) = optimize(opt, theta_init)

I try rewrite it to python next way:

 # Constraints function

    def constraints_func(x, g, opt):
        loss = loss_func(x)
        if (loss < 0) and (scan_func(x) > scan_bound):
            opt.force_stop()
            #raise nlopt.ForcedStop("Out of the scan bound but in ll constraint.")
        else:
            return loss

    # constrain optimizer
    opt = nlopt.opt(nlopt.LN_AUGLAG, n_theta)
    opt.set_ftol_abs(scan_tol)
    opt.set_max_objective(lambda x, g: scan_func(x))

    lb = [theta_bounds[i][0] for i in range(n_theta)]  # minimum.(theta_bounds)
    ub = [theta_bounds[i][1] for i in range(n_theta)]  # maximum.(theta_bounds)
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    opt.set_local_optimizer(local_opt)
    opt.set_maxeval(max_iter)
    # print(max_iter)
    # inequality constraints
    opt.add_inequality_constraint(lambda x, g: constraints_func(x, g, opt), loss_tol)
    # start optimization
    optx = opt.optimize(theta_init)
    optf = opt.last_optimum_value()
    ret = opt.last_optimize_result()

But when I run it, I get nlopt invalid argument, if instead

opt.force_stop()

I use

raise nlopt.ForcedStop("Out of the scan bound but in ll constraint.")

I get nlopt.ForcedStop: Out of the scan bound but in ll constraint

But I excpected, that Nlopt handling the exception and return result of optimization with special code.


Solution

  • Unfochently, I can't solve this problem, but I use standart python method

       try:
            optx = opt.optimize(theta_init)
            optf = opt.last_optimum_value()
            ret = opt.last_optimize_result()
        except nlopt.ForcedStop:
            ret = -5
    
    def constraints_func(x, g):
            loss = loss_func(x)
            if (loss < 0) and (scan_func(x) > scan_bound):
                #return opt.force_stop()
                raise nlopt.ForcedStop("Out of the scan bound but in ll constraint.")
            else:
                return loss