pythonibm-cloudcplexwatson-studiodocplex

IBM Watson CPLEX Shows no Variables, no Solution when solving LP file


I'm migrating an application that formerly ran on IBM's DoCloud to their new API based off of Watson. Since our application doesn't have data formatted in CSV nor a separation between the model and data layers it seemed simpler to upload an LP file along with a model file that reads the LP file and solves it. I can upload and it claims to solve correctly but returns empty solve status. I've also output various model info (e.g. number of variables) and everything is zeroed out. I've confirmed the LP isn't blank - it has a trivial MILP.

Here is my model code (most of which is taken directly from the example at https://dataplatform.cloud.ibm.com/exchange/public/entry/view/50fa9246181026cd7ae2a5bc7e4ac7bd):

import os
import sys
from os.path import splitext

import pandas
from docplex.mp.model_reader import ModelReader
from docplex.util.environment import get_environment
from six import iteritems


def loadModelFiles():
    """Load the input CSVs and extract the model and param data from it
    """
    env = get_environment()
    inputModel = params = None
    modelReader = ModelReader()

    for inputName in [f for f in os.listdir('.') if splitext(f)[1] != '.py']:
        inputBaseName, ext = splitext(inputName)

        print(f'Info: loading {inputName}')

        try:
            if inputBaseName == 'model':
                inputModel = modelReader.read_model(inputName, model_name=inputBaseName)
            elif inputBaseName == 'params':
                params = modelReader.read_prm(inputName)
        except Exception as e:
            with env.get_input_stream(inputName) as inStream:
                inData = inStream.read()
            raise Exception(f'Error: {e} found while processing {inputName} with contents {inData}')

    if inputModel is None or params is None:
        print('Warning: error loading model or params, see earlier messages for details')

    return inputModel, params


def writeOutputs(outputs):
    """Write all dataframes in ``outputs`` as .csv.

    Args:
        outputs: The map of outputs 'outputname' -> 'output df'
    """
    for (name, df) in iteritems(outputs):
        csv_file = '%s.csv' % name
        print(csv_file)
        with get_environment().get_output_stream(csv_file) as fp:
            if sys.version_info[0] < 3:
                fp.write(df.to_csv(index=False, encoding='utf8'))
            else:
                fp.write(df.to_csv(index=False).encode(encoding='utf8'))
    if len(outputs) == 0:
        print("Warning: no outputs written")


# load and solve model
model, modelParams = loadModelFiles()
ok = model.solve(cplex_parameters=modelParams)

solution_df = pandas.DataFrame(columns=['name', 'value'])

for index, dvar in enumerate(model.solution.iter_variables()):
    solution_df.loc[index, 'name'] = dvar.to_string()
    solution_df.loc[index, 'value'] = dvar.solution_value

outputs = {}
outputs['solution'] = solution_df

# Generate output files
writeOutputs(outputs)

try:
    with get_environment().get_output_stream('test.txt') as fp:
        fp.write(f'{model.get_statistics()}'.encode('utf-8'))

except Exception as e:
    with get_environment().get_output_stream('excInfo') as fp:
        fp.write(f'Got exception {e}')

and a stub of the code that runs it (again, pulling heavily from the example):

prmFile = NamedTemporaryFile()
prmFile.write(self.ctx.cplex_parameters.export_prm_to_string().encode())
modelFile = NamedTemporaryFile()
modelFile.write(self.solver.export_as_lp_string(hide_user_names=True).encode())
modelMetadata = {
    self.client.repository.ModelMetaNames.NAME: self.name,
    self.client.repository.ModelMetaNames.TYPE: 'do-docplex_12.9',
    self.client.repository.ModelMetaNames.RUNTIME_UID: 'do_12.9'
}
baseDir = os.path.dirname(os.path.realpath(__file__))

def reset(tarinfo):
    tarinfo.uid = tarinfo.gid = 0
    tarinfo.uname = tarinfo.gname = 'root'
    return tarinfo

with NamedTemporaryFile() as tmp:
    tar = tarfile.open(tmp.name, 'w:gz')
    tar.add(f'{baseDir}/ibm_model.py', arcname='main.py', filter=reset)
    tar.add(prmFile.name, arcname='params.prm', filter=reset)
    tar.add(modelFile.name, arcname='model.lp', filter=reset)
    tar.close()

    modelDetails = self.client.repository.store_model(
        model=tmp.name,
        meta_props=modelMetadata
    )

    modelUid = self.client.repository.get_model_uid(modelDetails)

metaProps = {
    self.client.deployments.ConfigurationMetaNames.NAME: self.name,
    self.client.deployments.ConfigurationMetaNames.BATCH: {},
    self.client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': 1}
}
deployDetails = self.client.deployments.create(modelUid, meta_props=metaProps)
deployUid = self.client.deployments.get_uid(deployDetails)

solvePayload = {
    # we upload input data as part of model since only CSV data is supported in this interface
    self.client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [],
    self.client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [
        {
            "id": ".*"
        }
    ]
}

jobDetails = self.client.deployments.create_job(deployUid, solvePayload)
jobUid = self.client.deployments.get_job_uid(jobDetails)

while jobDetails['entity']['decision_optimization']['status']['state'] not in ['completed', 'failed',
                                                                                'canceled']:
    logger.debug(jobDetails['entity']['decision_optimization']['status']['state'] + '...')
    time.sleep(5)
    jobDetails = self.client.deployments.get_job_details(jobUid)

logger.debug(jobDetails['entity']['decision_optimization']['status']['state'])

# cleanup
self.client.repository.delete(modelUid)
prmFile.close()
modelFile.close()

Any ideas of what can be causing this or what a good test avenue is? It seems there's no way to view the output of the model for debugging, am I missing something in Watson studio?


Solution

  • Thanks to Alain for verifying the overall approach but the main issue was there was simply a bug in my code:

    After calling modelFile.write(...) it's necessary to call modelFile.seek(0) to reset the file pointer - otherwise it writes an empty file to the tar archive