Error when converting pytorch model: NotImplementedError: Image output 'colorOutput' has symbolic dimensions in its shape
Problem
I am trying to convert a super resolution model to mlmodel. I want get a mlmodel which has a flexiable image type, and its output is also image tpye. (why should it be flexiable? because for whatever wh images, it should output 2(w*h) image, not just for fixed image input like (1,3,256,256))
The code is as follows:
torch_model = model
shape = (1, 3, 256, 256)
shape2 = (1, 3, 257, 257)
input_shape = ct.Shape(shape=(1,
3,
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256),
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256)))
pam_model_traced = torch.jit.trace(torch_model,torch.rand(*shape)) #trace
# convert t0 mlmodel
pam_model_ml_flexable = ct.convert(
pam_model_traced,
inputs=[ct.ImageType(name="colorImage",shape=input_shape,color_layout=ct.colorlayout.RGB,)],
outputs=[ct.ImageType(name="colorOutput",color_layout=ct.colorlayout.RGB,)]
)
# pam_model_ml_flexable = ct.convert(
# pam_model_traced,
# inputs=[ct.TensorType(name="colorImage",shape=input_shape)],
# outputs=[ct.TensorType(name="colorOutput")]
# )
print(pam_model_ml_flexable.get_spec().description)
and it got an error: "NotImplementedError: Image output 'colorOutput' has symbolic dimensions in its shape"
So, what is the reason? Hope someone can tell me the reason. Thanks a lot!
Stack Trace
NotImplementedError Traceback (most recent call last)
Cell In[17], line 15
10 pam_model_traced = torch.jit.trace(torch_model,torch.rand(*shape)) #trace
13 # convert t0 mlmodel
---> 15 pam_model_ml_flexable = ct.convert(
16 pam_model_traced,
17 inputs=[ct.ImageType(name="colorImage",shape=input_shape,color_layout=ct.colorlayout.RGB,)],
18 outputs=[ct.ImageType(name="colorOutput",color_layout=ct.colorlayout.RGB,)]
19 )
21 # pam_model_ml_flexable = ct.convert(
22 # pam_model_traced,
23 # inputs=[ct.TensorType(name="colorImage",shape=input_shape)],
24 # outputs=[ct.TensorType(name="colorOutput")]
25 # )
26 print(pam_model_ml_flexable.get_spec().description)
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/_converters_entry.py:492, in convert(model, source, inputs, outputs, classifier_config, minimum_deployment_target, convert_to, compute_precision, skip_model_load, compute_units, package_dir, debug, pass_pipeline)
489 if specification_version is None:
490 specification_version = _set_default_specification_version(exact_target)
--> 492 mlmodel = mil_convert(
493 model,
494 convert_from=exact_source,
495 convert_to=exact_target,
496 inputs=inputs,
497 outputs=outputs_as_tensor_or_image_types, # None or list[ct.ImageType/ct.TensorType]
498 classifier_config=classifier_config,
499 skip_model_load=skip_model_load,
500 compute_units=compute_units,
501 package_dir=package_dir,
502 debug=debug,
503 specification_version=specification_version,
504 main_pipeline=pass_pipeline,
505 )
507 if exact_target == 'milinternal':
508 return mlmodel # Returns the MIL program
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/mil/converter.py:188, in mil_convert(model, convert_from, convert_to, compute_units, **kwargs)
149 @_profile
150 def mil_convert(
151 model,
(...)
155 **kwargs
156 ):
157 """
158 Convert model from a specified frontend `convert_from` to a specified
159 converter backend `convert_to`.
(...)
186 See `coremltools.converters.convert`
187 """
--> 188 return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/mil/converter.py:212, in _mil_convert(model, convert_from, convert_to, registry, modelClass, compute_units, **kwargs)
209 weights_dir = _tempfile.TemporaryDirectory()
210 kwargs["weights_dir"] = weights_dir.name
--> 212 proto, mil_program = mil_convert_to_proto(
213 model,
214 convert_from,
215 convert_to,
216 registry,
217 **kwargs
218 )
220 _reset_conversion_state()
222 if convert_to == 'milinternal':
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/mil/converter.py:303, in mil_convert_to_proto(model, convert_from, convert_to, converter_registry, main_pipeline, **kwargs)
298 raise NotImplementedError(
299 f'Backend converter "{convert_to}" not implemented, must be '
300 f"one of: {list(converter_registry.backends.keys())}"
301 )
302 backend_converter = backend_converter_type()
--> 303 out = backend_converter(prog, **kwargs)
305 return out, prog
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/mil/converter.py:119, in NNProtoBackend.__call__(self, *args, **kwargs)
116 def __call__(self, *args, **kwargs):
117 from .backend.nn.load import load
--> 119 return load(*args, **kwargs)
File ~/miniconda3/envs/super-image/lib/python3.8/site-packages/coremltools/converters/mil/backend/nn/load.py:288, in load(prog, **kwargs)
286 raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported")
287 if any([is_symbolic(d) for d in shape]):
--> 288 raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape".
289 format(var.name))
290 _validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False)
291 clr_space = _get_colorspace_enum(output_types[i].color_layout)
NotImplementedError: Image output 'colorOutput' has symbolic dimensions in its shape
To Reproduce this error
- pip install super-image
- run this code
from super_image import EdsrModel, ImageLoader,PanModel
from PIL import Image
import coremltools as ct
import requests
import torch
# url = 'https://paperswithcode.com/media/datasets/Set5-0000002728-07a9793f_zA3bDjj.jpg'
# image = Image.open("./my.jpg")
model = PanModel.from_pretrained('eugenesiow/pan-bam', scale=2)
# inputs = ImageLoader.load_image(image)
# preds = model(inputs)
#
# ImageLoader.save_image(preds, './my_scaled_2x.png')
# ImageLoader.save_compare(inputs, preds, './my_scaled_2x_compare.png')
model.eval()
torch_model = model
shape = (1, 3, 256, 256)
shape2 = (1, 3, 257, 257)
input_shape = ct.Shape(shape=(1,
3,
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256),
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256)))
pam_model_traced = torch.jit.trace(torch_model,torch.rand(*shape)) #trace
# convert t0 mlmodel
pam_model_ml_flexable = ct.convert(
pam_model_traced,
inputs=[ct.ImageType(name="colorImage",shape=input_shape,color_layout=ct.colorlayout.RGB,)],
outputs=[ct.ImageType(name="colorOutput",color_layout=ct.colorlayout.RGB,)]
)
# pam_model_ml_flexable = ct.convert(
# pam_model_traced,
# inputs=[ct.TensorType(name="colorImage",shape=input_shape)],
# outputs=[ct.TensorType(name="colorOutput")]
# )
print(pam_model_ml_flexable.get_spec().description)
Environment
coremltools==6.3 torch==1.9.0 torchvision==0.10.0 super-image==0.1.7
can anybody have a look on this issue?
For security reasons I'm not able to load untrusted models. Can you give us a simple self-contained way to reproduce the issue?
I created a colab notebook, you can reproduce the issue in this notebook.
https://colab.research.google.com/drive/1LFJWLDgiex52M0OK-PcADFtPChbJUaua?usp=sharing
Here is a minimal example to reproduce the issue:
import torch
import coremltools as ct
class Net(torch.nn.Module):
def forward(self, x):
return x + x
x = torch.rand(1, 3, 256, 256)
m = torch.jit.trace(Net(), x)
input_shape = ct.Shape(shape=(
1,
3,
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256),
ct.RangeDim(lower_bound=25, upper_bound=1080, default=256))
)
ct.convert(
m,
inputs=[ct.ImageType(shape=input_shape)],
outputs=[ct.ImageType(name="colorOutput",color_layout=ct.colorlayout.RGB,)],
convert_to="mlprogram"
)
I suspect the issue is that the Core ML Framework does not allow models that output images with variable sizes.
Will we consider supporting this feature? Allow models that output images with variable sizes.
Any updates for this issue? It would be really useful to be able to convert models that output images with variable shapes.