tensorflowtensorflow-litetensorflow-hubmovenet

How can I save a model with input shape (1, None, None, 3) with None fixed to 256?


I have questions about a model obtained from TensorFlow Hub.

I am trying to use "movenet" with the following source code.

https://www.kaggle.com/models/google/movenet/tensorFlow2

(1) The Input Shape of this model is (1, None, None, 3), but how can I fix the None part? To create a fixed TensorFlow Lite model, I want to fix the input shape to (1, 256, 256, 3).

(2) Also, I can't display the summary of this model. What should I do?

(3) I want to check the input shape and output shape after tf.saved_model.load, but when I run loaded.signatures['serving_default'], a KeyError occurs. How can I check the input shape and output shape after loading?

    input_size = 256

    model_url = "https://www.kaggle.com/models/google/movenet/TensorFlow2/multipose-lightning/1"
    model = tfhub.load(model_url)
    movenet = model.signatures['serving_default']
    print(movenet.structured_input_signature)
    print(movenet.structured_outputs)
    tf.saved_model.save(model, './saved_model')

    model.summary() # -->> ERROR

    loaded = tf.saved_model.load('./saved_model')
    print(list(loaded.signatures.keys())) # -->> [] # empty!
    movenet = loaded.signatures['serving_default'] # -->> ERROR
    print(movenet.structured_input_signature)
    print(movenet.structured_outputs)
((), {'input': TensorSpec(shape=(1, None, None, 3), dtype=tf.int32, name='input')})
{'output_0': TensorSpec(shape=(1, 6, 56), dtype=tf.float32, name='output_0')}
### model.summary() # -->> ERROR
'_UserObject' object has no attribute 'summary'
  File "/home/debian/sandbox/movenet/python/movenet_multi.py", line 126, in main
    model.summary() # -->> ERROR
  File "/home/debian/sandbox/movenet/python/movenet_multi.py", line 356, in <module>
    main()
AttributeError: '_UserObject' object has no attribute 'summary'
### movenet = loaded.signatures['serving_default'] # -->> ERROR
'serving_default'
  File "/home/debian/sandbox/movenet/python/movenet_multi.py", line 127, in main
    movenet = loaded.signatures['serving_default'] # -->> ERROR
  File "/home/debian/sandbox/movenet/python/movenet_multi.py", line 355, in <module>
    main()
KeyError: 'serving_default'

Solution

  • I solved it myself.

    (1) I was able to solve the issue by converting the model using the flow below.

    (2) It is not a Keras model, so it seems that the internals cannot be seen with summary(). I was able to solve it by exporting it to TensorFlow Lite format and visualizing it using Netron.

    (3) Just like issue (2), you can check it using Netron.

    Download a model from TensorFlow Hub and save it in Keras and TensorFlow Lite formats.

    ### Save as tflite format
    # -->> for movenet
    import tensorflow as tf
    import tensorflow_hub as hub
    
    model = tf.keras.Sequential(
        [
            hub.KerasLayer(
                "https://www.kaggle.com/models/google/movenet/TensorFlow2/multipose-lightning/1",
                trainable=False,
                signature="serving_default", 
                signature_outputs_as_dict=True,
            ),
        ]
    )
    model.build([1, 256, 256, 3])
    model.summary()
    model.save("movenet-multipose-lightning")
    
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    tflite_model = converter.convert()
    open("converted_model.tflite", "wb").write(tflite_model)
    

    Convert TensorFlow Lite to ONNX, fix input shape and convert back to TensorFlow Lite

    ### Convert to ONNX format
    $ python -m tf2onnx.convert --tflite converted_model.tflite --output converted_model_nchw_output.onnx --inputs-as-nchw serving_default_keras_layer_input:0
    
    ### Fixing input shape
    $ python -m onnxruntime.tools.make_dynamic_shape_fixed --input_name serving_default_keras_layer_input:0 --input_shape 1,3,256,256 converted_model_nchw_o
    utput.onnx converted_model_nchw_output_fixed.onnx
    
    ### Remove suffix
    $ python script.py converted_model_nchw_output_fixed.onnx converted_model_nchw_output_mod.onnx
    
    ### Converting back to TensorFlow format
    $ onnx2tf -i converted_model_nchw_output_mod.onnx -osd
    
    ### Convert back to TensorFlow Lite format
    $ python tf2tflite_no_optim.py -m saved_model -o movenet_multi_nchw_output_fixed.tflite
    

    script.py

    import onnx
    import sys
    
    def remove_suffix_from_names(model_path, output_model_path, suffix=':0'):
        onnx_model = onnx.load(model_path)
        graph_input_names = [input.name for input in onnx_model.graph.input]
        graph_output_names = [output.name for output in onnx_model.graph.output]
        print('graph_input_names =', graph_input_names)
        print('graph_output_names =', graph_output_names)
    
        for input in onnx_model.graph.input:
            input.name = input.name.removesuffix(suffix)
    
        for output in onnx_model.graph.output:
            output.name = output.name.removesuffix(suffix)
    
        for node in onnx_model.graph.node:
            for i in range(len(node.input)):
                if node.input[i] in graph_input_names:
                    node.input[i] = node.input[i].removesuffix(suffix)
    
            for i in range(len(node.output)):
                if node.output[i] in graph_output_names:
                    node.output[i] = node.output[i].removesuffix(suffix)
    
        onnx.save(onnx_model, output_model_path)
    
    if __name__ == "__main__":
        if len(sys.argv) != 3:
            print("Usage: python3 script.py <input_model.onnx> <output_model.onnx>")
            sys.exit(1)
    
        input_model_path = sys.argv[1]
        output_model_path = sys.argv[2]
    
        remove_suffix_from_names(input_model_path, output_model_path)
    

    tf2tflite_no_optim.py

    import tensorflow as tf
    import numpy as np
    import argparse
    import os
    
    def representative_dataset():
        for _ in range(100): 
            data = tf.random.uniform((1, 544, 960, 3), minval=-1, maxval=1, dtype=tf.float32)  
            yield [data]
    
    def convert_model(model_path, output_model_path):
        converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
        tflite_model = converter.convert()
    
        with open(output_model_path, "wb") as f:
            f.write(tflite_model)
        print(f"Successfully converted and saved to {output_model_path}")
    
    def main():
        parser = argparse.ArgumentParser(description="Convert a TensorFlow SavedModel to a quantized TFLite model.")
        parser.add_argument("-m", "--model_path", type=str, required=True, help="Path to the input TensorFlow SavedModel directory.")
        parser.add_argument("-o", "--output_path", type=str, help="Path to save the converted TFLite model.  If not specified, defaults to the same directory as the input model.")
    
        args = parser.parse_args()
    
        if args.output_path is None:
            base_name = os.path.basename(os.path.normpath(args.model_path))
            output_model_path = os.path.join(os.path.dirname(args.model_path), f"{base_name}_int8.tflite")
        else:
            output_model_path = args.output_path
    
    
        convert_model(args.model_path, output_model_path)
    
    if __name__ == "__main__":
        main()
    
    

    requirements.txt

    absl-py==2.2.1
    astunparse==1.6.3
    cachetools==5.5.2
    certifi==2025.1.31
    charset-normalizer==3.4.1
    coloredlogs==15.0.1
    flatbuffers==1.12
    gast==0.4.0
    google-auth==2.38.0
    google-auth-oauthlib==0.4.6
    google-pasta==0.2.0
    grpcio==1.71.0
    h5py==3.13.0
    humanfriendly==10.0
    idna==3.10
    keras==2.9.0
    Keras-Preprocessing==1.1.2
    libclang==18.1.1
    Markdown==3.7
    MarkupSafe==3.0.2
    mpmath==1.3.0
    numpy==1.26.4
    oauthlib==3.2.2
    onnx==1.14.1
    onnx-graphsurgeon==0.5.7
    onnx2tf==1.26.9
    onnxruntime==1.21.0
    opt_einsum==3.4.0
    packaging==24.2
    protobuf==3.20.3
    psutil==7.0.0
    pyasn1==0.6.1
    pyasn1_modules==0.4.2
    requests==2.32.3
    requests-oauthlib==2.0.0
    rsa==4.9
    six==1.17.0
    sng4onnx==1.0.4
    sympy==1.13.3
    tensorboard==2.9.0
    tensorboard-data-server==0.6.1
    tensorboard-plugin-wit==1.8.1
    tensorflow==2.9.0
    tensorflow-estimator==2.9.0
    tensorflow-hub==0.16.1
    tensorflow-io-gcs-filesystem==0.37.1
    tensorflow-neuron==1.0
    termcolor==3.0.0
    tf-keras==2.14.1
    tf2onnx==1.13.0
    typing_extensions==4.13.0
    urllib3==2.3.0
    Werkzeug==3.1.3
    wrapt==1.17.2