coremltools
coremltools copied to clipboard
cannot convert maskrcnn model in detectron2 by scripting
🐞Describe the bug
the maskrcnn model in detectron2 can be converted to torchscript via torch.jit.script without problem. However, this torchscript model cannot be convert to coreml.
Stack Trace
+ ./export2coreml-detectron2-maskrcnn.py --fmt coreml
WARNING:root:Torch version 1.11.0+cu115 has not been tested with coremltools. You may run into unexpected errors. Torch 1.10.2 is the most recent version that has been tested.
Traceback (most recent call last):
File "./export2coreml-detectron2-maskrcnn.py", line 168, in <module>
main()
File "./export2coreml-detectron2-maskrcnn.py", line 158, in main
export_scripting(
File "./export2coreml-detectron2-maskrcnn.py", line 132, in export_scripting
mlmodel = ct.converters.convert(
File "./venv/lib/python3.8/site-packages/coremltools/converters/_converters_entry.py", line 352, in convert
mlmodel = mil_convert(
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/converter.py", line 183, in mil_convert
return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/converter.py", line 210, in _mil_convert
proto, mil_program = mil_convert_to_proto(
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/converter.py", line 273, in mil_convert_to_proto
prog = frontend_converter(model, **kwargs)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/converter.py", line 105, in __call__
return load(*args, **kwargs)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 46, in load
converter = TorchConverter(torchscript, inputs, outputs, cut_at_symbols)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 156, in __init__
raw_graph, params_dict = self._expand_and_optimize_ir(self.torchscript)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 456, in _expand_and_optimize_ir
graph, params_dict = TorchConverter._jit_pass_lower_graph(graph, torchscript)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 400, in _jit_pass_lower_graph
_lower_graph_block(graph)
File "./venv/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 379, in _lower_graph_block
module = getattr(node_to_module_map[_input], attr_name)
KeyError: images.2 defined in (%images.2 : __torch__.detectron2.structures.image_list.ImageList = prim::CreateObject()
)
To Reproduce
try to use the attached script.
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import json
import logging
import os
from enum import Enum
from typing import List, Dict, Tuple
import torch
from detectron2.structures import Boxes
import detectron2
import detectron2.config
from detectron2.export import dump_torchscript_IR, scripting_with_instances
from detectron2.modeling import build_model
from detectron2.utils.env import TORCH_VERSION
from detectron2 import model_zoo
from detectron2.utils.file_io import PathManager
import coremltools as ct
def parse_arguments():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--th',
dest='confidence_threshold',
type=float,
default=0.1,
metavar="confidence_threshold",
help='[default:%(default)s]'
)
parser.add_argument(
"--fmt",
dest="export_format",
required=True,
type=output_format_t,
help=f"{[e.name for e in output_format_t]}"
)
return parser.parse_args()
class output_format_t(Enum):
torchscript="torchscript"
coreml="coreml"
def export_scripting(
torch_model,
fmt: output_format_t,
outdir: str
):
assert TORCH_VERSION >= (1, 8)
fields = {
"proposal_boxes": Boxes,
"objectness_logits": torch.Tensor,
"pred_boxes": Boxes,
"scores": torch.Tensor,
"pred_classes": torch.Tensor,
"pred_masks": torch.Tensor,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
class ScriptableAdapterBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = torch_model
self.eval()
class ScriptableAdapter(ScriptableAdapterBase):
# Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
# by not retuning instances but dicts. Otherwise the exported model is not deployable
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, torch.Tensor]]:
instances = self.model.inference(inputs, do_postprocess=False)
return [i.get_fields() for i in instances]
model = ScriptableAdapter()
ts_model = scripting_with_instances(model, fields)
if fmt == output_format_t.torchscript:
with PathManager.open(os.path.join(outdir, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, outdir)
elif fmt == output_format_t.coreml:
ml_inputs = [
ct.TensorType(
name="image",
shape=ct.Shape(shape=(3, ct.RangeDim(), ct.RangeDim())),
dtype=ct.converters.mil.types.fp32
),
ct.TensorType(
name="height",
shape=(1,),
dtype=ct.converters.mil.types.fp32
),
ct.TensorType(
name="width",
shape=(1,),
dtype=ct.converters.mil.types.fp32
)
]
mlmodel = ct.converters.convert(
ts_model,
inputs=ml_inputs,
debug=True
)
outpath_model = os.path.join(outdir, f"{fmt.name}.mlmodel")
else:
raise RuntimeError(f"unsupported output format {fmt}")
return None
def main():
parsed_args = parse_arguments()
logging.info(f"{parsed_args=}")
cfg = detectron2.config.get_cfg()
cfg.INPUT.FORMAT = 'RGB'
model_name="X_101_32x8d_FPN_3x"
cfg.merge_from_file(model_zoo.get_config_file(f"COCO-InstanceSegmentation/mask_rcnn_{model_name}.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = parsed_args.confidence_threshold # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(f"COCO-InstanceSegmentation/mask_rcnn_{model_name}.yaml")
cfg.MODEL.DEVICE = "cpu"
model = build_model(cfg)
model.eval()
outdir = f"./playground/exported_nn_models/maskrcnn-scripting/{model_name}"
os.makedirs(outdir, exist_ok=True)
export_scripting(
torch_model=model,
outdir=outdir,
fmt=parsed_args.export_format
)
cfg_outpath = os.path.join(outdir, "detectron2_cfg.yml")
with open(cfg_outpath, "w") as _f:
cfg.dump(stream=_f, indent=2)
if __name__ == '__main__':
main()
to save the torchscript model:
./export2coreml-detectron2-maskrcnn.py --fmt torchscript
to convert to coreml:
./export2coreml-detectron2-maskrcnn.py --fmt coreml
System environment (please complete the following information):
- coremltools version: 5.2.0
- Ubuntu 20.04
- Any other relevant version information:
- Torch version 1.11.0+cu115
run with torch 1.10.2 yield the same stack trace.
one related question will be: if we call the model in pytorch like model([input,])
. the input is a dict: {key1:val1, key2:val2}
What the inputs in ct.convert()
should look like? Will it looks like [ct.TensorType(shape of val1), ct.TensorType(shape of va2)]
or it should be some more nested types?
@braindevices - can you give us a minimal example (i.e. a simple toy network) that reproduces this issue?
@TobyRoseman this is the current minimal I can get. The model is from Detectron2 Model Zoo. When you run the script it will automatically download it.