I'm trying to migrate a ,currently broken because of breaking changes, utility that solves a .mps using IBM's APIs.
The original code, uses an empty model.tar.gz file, creates a deployment and passes the .mps file tp a new job.
The (python)code looks like this :
import tarfile
tar = tarfile.open("model.tar.gz", "w:gz")
tar.close()
test_metadata = {
client.repository.ModelMetaNames.NAME: "Test",
client.repository.ModelMetaNames.DESCRIPTION: "Model for Test",
client.repository.ModelMetaNames.TYPE: "do-cplex_12.9",
client.repository.ModelMetaNames.RUNTIME_UID: "do_12.9"
}
model_details = client.repository.store_model(model='model.tar.gz', meta_props=test_metadata)
model_uid = client.repository.get_model_uid(model_details)
n_nodes = 1
meta_props = {
client.deployments.ConfigurationMetaNames.NAME: "Test Deployment " + str(n_nodes),
client.deployments.ConfigurationMetaNames.DESCRIPTION: "Test Deployment",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': n_nodes}
}
deployment_details = client.deployments.create(model_uid, meta_props=meta_props)
deployment_uid = client.deployments.get_uid(deployment_details)
solve_payload = {
client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: {
'oaas.logAttachmentName':'log.txt',
'oaas.logTailEnabled':'true',
'oaas.resultsFormat': 'JSON'
},
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'test.mps',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'test.mps'
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES: [
{
'id':'solution.json',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'solution.json'
}
},
{
'id':'log.txt',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'log.txt'
}
}
]
}
job_details = client.deployments.create_job(deployment_uid, solve_payload)
The closest I've managed to do (which is almost exactly what I need), is use most of the code from this example:
https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/deployments/decision_optimization/Use%20Decision%20Optimization%20to%20plan%20your%20diet.ipynb
Here is a full working sample.
from ibm_watson_machine_learning import APIClient
import os
import wget
import json
import pandas as pd
import time
COS_ENDPOINT = "https://s3.ams03.cloud-object-storage.appdomain.cloud"
model_path = 'do-model.tar.gz'
api_key = 'XXXXX'
access_key_id = "XXXX",
secret_access_key= "XXXX"
location = 'eu-gb'
space_id = 'XXXX'
softwareSpecificationName = "do_12.9"
modelType = "do-docplex_12.9"
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
client = APIClient(wml_credentials)
client.set.default_space(space_id)
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/decision_optimization/do-model.tar.gz")
sofware_spec_uid = client.software_specifications.get_uid_by_name(softwareSpecificationName)
model_meta_props = {
client.repository.ModelMetaNames.NAME: "LOCALLY created DO model",
client.repository.ModelMetaNames.TYPE: modelType,
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(model=model_path, meta_props=model_meta_props)
time.sleep(5) # So that the model is avalable on the API
published_model_uid = client.repository.get_model_uid(published_model)
client.repository.list_models()
meta_data = {
client.deployments.ConfigurationMetaNames.NAME: "deployment_DO",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"name": "S", "num_nodes": 1}
}
deployment_details = client.deployments.create(published_model_uid, meta_props=meta_data)
time.sleep(5) # So that the deployment is avalable on the API
deployment_uid = client.deployments.get_uid(deployment_details)
client.deployments.list()
job_payload_ref = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'diet_food.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food.csv"
}
},
{
'id':'diet_food_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food_nutrients.csv"
}
},
{
'id':'diet_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_nutrients.csv"
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES:
[
{
'id':'.*',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key':secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "${job_id}/${attachment_name}"
}
}
]
}
job = client.deployments.create_job(deployment_uid, meta_props=job_payload_ref)
The above example uses a model and a few csv files as input. When I change the INPUT_DATA_REFERENCES to use a .mps file (and an empty model), I get an error
"errors": [
{
"code": "invalid_model_archive_in_deployment",
"message": "Invalid or unrecognized archive type in deployment `XXX-XXX-XXX`.
Supported archive types are `zip` or `tar.gz`"
}
I'm not an expert but from what I understand the mps file contains both the input and the model file so I shouldn't have to provide both.
The answer was provided by Alex Fleischer on another forum.
A full example can be found here:
https://medium.com/@AlainChabrier/solve-lp-problems-from-do-experiments-9afd4d53aaf5
The above link(which is similar to the code in my question) shows an example with a ".lp" file but it's exactly the same for a ".mps" file too.
(no note that the model type is do-cplex_12.10 , not do-docplex_12.10)
My problem was that I was using an empty model.tar.gz file.
Once you have the .lp/.mps file in the archive, everything works as expected