coremltools icon indicating copy to clipboard operation
coremltools copied to clipboard

ClassificationConfig prevents model compression

Open lukasugar opened this issue 1 year ago • 3 comments

🐞Describing the bug

I have a pytorch model ,which I'm able to convert to CoreML and compress. If I add ClassificationConfig during conversion, I'm unable to compress the model. The error is: "identity_1" (op_type: identity) Input x="classLabel_probs" expects list, tensor, or scalar but got dict[str,fp64]"

Stack Trace

	"name": "ValueError",
	"message": "Op \"identity_1\" (op_type: identity) Input x=\"classLabel_probs\" expects list, tensor, or scalar but got dict[str,fp64]",
	"stack": "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)\nCell \u001b[0;32mIn[162], line 2\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[39m# Quantizing model\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m compress_coreml_program(output_base_path\u001b[39m=\u001b[39;49moutput_folder_path, model_path\u001b[39m=\u001b[39;49mcurrent_model_path)\n\nFile \u001b[0;32m~/Documents/repos/ml-explorations/object_detection/classification_training/../../clip/utils/compression_utils.py:83\u001b[0m, in \u001b[0;36mcompress_coreml_program\u001b[0;34m(output_base_path, model_path, model)\u001b[0m\n\u001b[1;32m     80\u001b[0m \u001b[39mfor\u001b[39;00m interpolation_mode \u001b[39min\u001b[39;00m interpolation_modes:\n\u001b[1;32m     81\u001b[0m     \u001b[39mprint\u001b[39m(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mCompressing model to 8 bits using \u001b[39m\u001b[39m{\u001b[39;00minterpolation_mode\u001b[39m}\u001b[39;00m\u001b[39m interpolation mode\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m---> 83\u001b[0m     compressed_model \u001b[39m=\u001b[39m ct\u001b[39m.\u001b[39;49mcompression_utils\u001b[39m.\u001b[39;49maffine_quantize_weights(model, mode \u001b[39m=\u001b[39;49m interpolation_mode)\n\u001b[1;32m     84\u001b[0m     out_path \u001b[39m=\u001b[39m os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mjoin(output_base_path, \u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39maffine_\u001b[39m\u001b[39m{\u001b[39;00minterpolation_mode\u001b[39m}\u001b[39;00m\u001b[39m_model.mlpackage\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m     85\u001b[0m     os\u001b[39m.\u001b[39mmakedirs(os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mdirname(out_path), exist_ok\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/models/ml_program/compression_utils.py:243\u001b[0m, in \u001b[0;36maffine_quantize_weights\u001b[0;34m(mlmodel, mode, op_selector, dtype)\u001b[0m\n\u001b[1;32m    241\u001b[0m     op_selector \u001b[39m=\u001b[39m _default_op_selector\n\u001b[1;32m    242\u001b[0m affine_weight_quantizer \u001b[39m=\u001b[39m _WeightAffineQuantizer(fake_compression\u001b[39m=\u001b[39m\u001b[39mFalse\u001b[39;00m, mode\u001b[39m=\u001b[39mmode, op_selector\u001b[39m=\u001b[39mop_selector, dtype\u001b[39m=\u001b[39mdtype)\n\u001b[0;32m--> 243\u001b[0m \u001b[39mreturn\u001b[39;00m _apply_graph_pass(mlmodel, affine_weight_quantizer)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/models/ml_program/compression_utils.py:64\u001b[0m, in \u001b[0;36m_apply_graph_pass\u001b[0;34m(mlmodel, graph_pass)\u001b[0m\n\u001b[1;32m     61\u001b[0m graph_pass\u001b[39m.\u001b[39mapply(prog)\n\u001b[1;32m     63\u001b[0m \u001b[39m# convert the pymil program back to mlmodel\u001b[39;00m\n\u001b[0;32m---> 64\u001b[0m compressed_mlmodel \u001b[39m=\u001b[39m _mil_convert(\n\u001b[1;32m     65\u001b[0m     prog,\n\u001b[1;32m     66\u001b[0m     convert_to\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mmlprogram\u001b[39;49m\u001b[39m\"\u001b[39;49m,\n\u001b[1;32m     67\u001b[0m     convert_from\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mmilinternal\u001b[39;49m\u001b[39m\"\u001b[39;49m,\n\u001b[1;32m     68\u001b[0m     specification_version\u001b[39m=\u001b[39;49mspecification_version,\n\u001b[1;32m     69\u001b[0m     compute_units\u001b[39m=\u001b[39;49mmlmodel\u001b[39m.\u001b[39;49mcompute_unit,\n\u001b[1;32m     70\u001b[0m     model_description\u001b[39m=\u001b[39;49mmodel_spec\u001b[39m.\u001b[39;49mdescription,\n\u001b[1;32m     71\u001b[0m )\n\u001b[1;32m     72\u001b[0m \u001b[39mreturn\u001b[39;00m compressed_mlmodel\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/converter.py:188\u001b[0m, in \u001b[0;36mmil_convert\u001b[0;34m(model, convert_from, convert_to, compute_units, **kwargs)\u001b[0m\n\u001b[1;32m    149\u001b[0m \u001b[39m@_profile\u001b[39m\n\u001b[1;32m    150\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mmil_convert\u001b[39m(\n\u001b[1;32m    151\u001b[0m     model,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    155\u001b[0m     \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs\n\u001b[1;32m    156\u001b[0m ):\n\u001b[1;32m    157\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m    158\u001b[0m \u001b[39m    Convert model from a specified frontend `convert_from` to a specified\u001b[39;00m\n\u001b[1;32m    159\u001b[0m \u001b[39m    converter backend `convert_to`.\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    186\u001b[0m \u001b[39m        See `coremltools.converters.convert`\u001b[39;00m\n\u001b[1;32m    187\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[0;32m--> 188\u001b[0m     \u001b[39mreturn\u001b[39;00m _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/converter.py:212\u001b[0m, in \u001b[0;36m_mil_convert\u001b[0;34m(model, convert_from, convert_to, registry, modelClass, compute_units, **kwargs)\u001b[0m\n\u001b[1;32m    209\u001b[0m     weights_dir \u001b[39m=\u001b[39m _tempfile\u001b[39m.\u001b[39mTemporaryDirectory()\n\u001b[1;32m    210\u001b[0m     kwargs[\u001b[39m\"\u001b[39m\u001b[39mweights_dir\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m=\u001b[39m weights_dir\u001b[39m.\u001b[39mname\n\u001b[0;32m--> 212\u001b[0m proto, mil_program \u001b[39m=\u001b[39m mil_convert_to_proto(\n\u001b[1;32m    213\u001b[0m                         model,\n\u001b[1;32m    214\u001b[0m                         convert_from,\n\u001b[1;32m    215\u001b[0m                         convert_to,\n\u001b[1;32m    216\u001b[0m                         registry,\n\u001b[1;32m    217\u001b[0m                         \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs\n\u001b[1;32m    218\u001b[0m                      )\n\u001b[1;32m    220\u001b[0m _reset_conversion_state()\n\u001b[1;32m    222\u001b[0m \u001b[39mif\u001b[39;00m convert_to \u001b[39m==\u001b[39m \u001b[39m'\u001b[39m\u001b[39mmilinternal\u001b[39m\u001b[39m'\u001b[39m:\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/converter.py:288\u001b[0m, in \u001b[0;36mmil_convert_to_proto\u001b[0;34m(model, convert_from, convert_to, converter_registry, main_pipeline, **kwargs)\u001b[0m\n\u001b[1;32m    285\u001b[0m prog \u001b[39m=\u001b[39m frontend_converter(model, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[1;32m    286\u001b[0m PipelineManager\u001b[39m.\u001b[39mapply_pipeline(prog, frontend_pipeline)\n\u001b[0;32m--> 288\u001b[0m PipelineManager\u001b[39m.\u001b[39;49mapply_pipeline(prog, main_pipeline)\n\u001b[1;32m    290\u001b[0m prog\u001b[39m.\u001b[39m_check_invalid_tensor_rank()\n\u001b[1;32m    292\u001b[0m \u001b[39mif\u001b[39;00m convert_to \u001b[39m==\u001b[39m \u001b[39m'\u001b[39m\u001b[39mmilinternal\u001b[39m\u001b[39m'\u001b[39m:\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/pass_pipeline.py:378\u001b[0m, in \u001b[0;36mPipelineManager.apply_pipeline\u001b[0;34m(prog, pass_pipeline)\u001b[0m\n\u001b[1;32m    376\u001b[0m     graph_pass \u001b[39m=\u001b[39m PASS_REGISTRY[pass_name]\n\u001b[1;32m    377\u001b[0m     graph_pass\u001b[39m.\u001b[39mset_options(pass_options)\n\u001b[0;32m--> 378\u001b[0m     graph_pass(prog)\n\u001b[1;32m    379\u001b[0m     prog\u001b[39m.\u001b[39mvalidate()\n\u001b[1;32m    380\u001b[0m logger\u001b[39m.\u001b[39mdebug(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mProgram after \u001b[39m\u001b[39m{\u001b[39;00mpass_pipeline\u001b[39m}\u001b[39;00m\u001b[39m pipeline:\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39m{\u001b[39;00mprog\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/graph_pass.py:55\u001b[0m, in \u001b[0;36mAbstractGraphPass.__call__\u001b[0;34m(self, prog)\u001b[0m\n\u001b[1;32m     53\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__call__\u001b[39m(\u001b[39mself\u001b[39m, prog: Program):\n\u001b[1;32m     54\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m prog\u001b[39m.\u001b[39mskip_all_passes:\n\u001b[0;32m---> 55\u001b[0m         \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mapply(prog)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py:1735\u001b[0m, in \u001b[0;36mreduce_transposes.apply\u001b[0;34m(self, prog)\u001b[0m\n\u001b[1;32m   1733\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mapply\u001b[39m(\u001b[39mself\u001b[39m, prog):\n\u001b[1;32m   1734\u001b[0m     \u001b[39mfor\u001b[39;00m f \u001b[39min\u001b[39;00m prog\u001b[39m.\u001b[39mfunctions\u001b[39m.\u001b[39mvalues():\n\u001b[0;32m-> 1735\u001b[0m         \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_reduce_transposes_block(f)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py:1753\u001b[0m, in \u001b[0;36mreduce_transposes._reduce_transposes_block\u001b[0;34m(block)\u001b[0m\n\u001b[1;32m   1750\u001b[0m         \u001b[39mreturn\u001b[39;00m\n\u001b[1;32m   1752\u001b[0m \u001b[39mwith\u001b[39;00m block:\n\u001b[0;32m-> 1753\u001b[0m     opt_transposes \u001b[39m=\u001b[39m _TransposeOptimization(block)\n\u001b[1;32m   1754\u001b[0m     opt_transposes\u001b[39m.\u001b[39mblock_traversal()\n\u001b[1;32m   1755\u001b[0m     opt_transposes\u001b[39m.\u001b[39mapply_transform()\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py:757\u001b[0m, in \u001b[0;36m_TransposeOptimization.__init__\u001b[0;34m(self, block)\u001b[0m\n\u001b[1;32m    754\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39moutput_sink_ops \u001b[39m=\u001b[39m []\n\u001b[1;32m    756\u001b[0m \u001b[39m# We modify the graph temporarily for outputs\u001b[39;00m\n\u001b[0;32m--> 757\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_add_output_sinks()\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py:766\u001b[0m, in \u001b[0;36m_TransposeOptimization._add_output_sinks\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    764\u001b[0m \u001b[39mfor\u001b[39;00m out_var \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mblock\u001b[39m.\u001b[39moutputs:\n\u001b[1;32m    765\u001b[0m     \u001b[39mif\u001b[39;00m out_var \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m output_sinks_var:\n\u001b[0;32m--> 766\u001b[0m         out_sink \u001b[39m=\u001b[39m mb\u001b[39m.\u001b[39;49midentity(x\u001b[39m=\u001b[39;49mout_var)\n\u001b[1;32m    767\u001b[0m         output_sinks_var[out_var] \u001b[39m=\u001b[39m out_sink\n\u001b[1;32m    768\u001b[0m     \u001b[39melse\u001b[39;00m:\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/ops/registry.py:182\u001b[0m, in \u001b[0;36mSSAOpRegistry.register_op.<locals>.class_wrapper.<locals>.add_op\u001b[0;34m(cls, **kwargs)\u001b[0m\n\u001b[1;32m    179\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m    180\u001b[0m     op_cls_to_add \u001b[39m=\u001b[39m op_reg[op_type]\n\u001b[0;32m--> 182\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39;49m\u001b[39m.\u001b[39;49m_add_op(op_cls_to_add, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/builder.py:166\u001b[0m, in \u001b[0;36mBuilder._add_op\u001b[0;34m(cls, op_cls, **kwargs)\u001b[0m\n\u001b[1;32m    161\u001b[0m kwargs \u001b[39m=\u001b[39m {k: v \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39misinstance\u001b[39m(v, (\u001b[39mlist\u001b[39m, \u001b[39mtuple\u001b[39m)) \u001b[39melse\u001b[39;00m v[:] \u001b[39mfor\u001b[39;00m k, v \u001b[39min\u001b[39;00m kwargs\u001b[39m.\u001b[39mitems() \u001b[39mif\u001b[39;00m v \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m}\n\u001b[1;32m    162\u001b[0m kwargs\u001b[39m.\u001b[39mupdate(\u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_create_vars(\n\u001b[1;32m    163\u001b[0m     input_spec\u001b[39m=\u001b[39mop_cls\u001b[39m.\u001b[39minput_spec,\n\u001b[1;32m    164\u001b[0m     op_name\u001b[39m=\u001b[39mkwargs[\u001b[39m\"\u001b[39m\u001b[39mname\u001b[39m\u001b[39m\"\u001b[39m], before_op\u001b[39m=\u001b[39mbefore_op,\n\u001b[1;32m    165\u001b[0m     candidate_kv\u001b[39m=\u001b[39mkwargs))\n\u001b[0;32m--> 166\u001b[0m new_op \u001b[39m=\u001b[39m op_cls(\u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    168\u001b[0m \u001b[39m# Initialize optional input Vars if it wasn't in kwargs\u001b[39;00m\n\u001b[1;32m    169\u001b[0m default_inputs \u001b[39m=\u001b[39m new_op\u001b[39m.\u001b[39mdefault_inputs()\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/operation.py:187\u001b[0m, in \u001b[0;36mOperation.__init__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m    184\u001b[0m \u001b[39m# Set inputs from kwargs\u001b[39;00m\n\u001b[1;32m    185\u001b[0m input_kv \u001b[39m=\u001b[39m {k: v \u001b[39mfor\u001b[39;00m k, v \u001b[39min\u001b[39;00m kwargs\u001b[39m.\u001b[39mitems()\n\u001b[1;32m    186\u001b[0m             \u001b[39mif\u001b[39;00m k \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_input_types \u001b[39mand\u001b[39;00m v \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m}\n\u001b[0;32m--> 187\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_validate_and_set_inputs(input_kv)\n\u001b[1;32m    188\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_ensure_required_inputs()\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/operation.py:496\u001b[0m, in \u001b[0;36mOperation._validate_and_set_inputs\u001b[0;34m(self, input_kvs, no_check_var_types)\u001b[0m\n\u001b[1;32m    493\u001b[0m         \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(msg\u001b[39m.\u001b[39mformat(v_new\u001b[39m.\u001b[39msym_type, v_old\u001b[39m.\u001b[39msym_type))\n\u001b[1;32m    494\u001b[0m     v_old\u001b[39m.\u001b[39mremove_child_op(op, no_check_var_types)\n\u001b[0;32m--> 496\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49minput_spec\u001b[39m.\u001b[39;49mvalidate_inputs(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mname, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mop_type, input_kvs)\n\u001b[1;32m    498\u001b[0m \u001b[39mfor\u001b[39;00m name, var \u001b[39min\u001b[39;00m input_kvs\u001b[39m.\u001b[39mitems():\n\u001b[1;32m    499\u001b[0m     \u001b[39m# Remove this operation itself from existing input\u001b[39;00m\n\u001b[1;32m    500\u001b[0m     \u001b[39m# Var's child_ops\u001b[39;00m\n\u001b[1;32m    501\u001b[0m     existing_input_var \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_input_vars[name]\n\nFile \u001b[0;32m/opt/homebrew/Caskroom/miniforge/base/envs/classification_env/lib/python3.10/site-packages/coremltools/converters/mil/mil/input_type.py:162\u001b[0m, in \u001b[0;36mInputSpec.validate_inputs\u001b[0;34m(self, op_name, op_type, candidate_kvs)\u001b[0m\n\u001b[1;32m    158\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39misinstance\u001b[39m(var, InternalVar) \u001b[39mand\u001b[39;00m \\\n\u001b[1;32m    159\u001b[0m     \u001b[39mnot\u001b[39;00m input_type\u001b[39m.\u001b[39mis_compatible(var):\n\u001b[1;32m    160\u001b[0m     msg \u001b[39m=\u001b[39m msg_prefix \u001b[39m+\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mInput \u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m=\u001b[39m\u001b[39m\\\"\u001b[39;00m\u001b[39m{}\u001b[39;00m\u001b[39m\\\"\u001b[39;00m\u001b[39m expects \u001b[39m\u001b[39m\"\u001b[39m \u001b[39m+\u001b[39m\\\n\u001b[1;32m    161\u001b[0m             \u001b[39m\"\u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m but got \u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m\"\u001b[39m\n\u001b[0;32m--> 162\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(msg\u001b[39m.\u001b[39mformat(name, var\u001b[39m.\u001b[39mname, input_type\u001b[39m.\u001b[39mtype_str,\n\u001b[1;32m    163\u001b[0m                 var\u001b[39m.\u001b[39msym_type\u001b[39m.\u001b[39m__type_info__()))\n\n\u001b[0;31mValueError\u001b[0m: Op \"identity_1\" (op_type: identity) Input x=\"classLabel_probs\" expects list, tensor, or scalar but got dict[str,fp64]"

To Reproduce

import timm
import torch
import coremltools as ct

# Create model
model = timm.create_model('mobilenetv3_small_100', pretrained=True, num_classes=3)

# Example input
example_input = torch.randn((1,3,224,224))

# Trace model
traced_model = torch.jit.trace(model, example_input)

classifier_config = ct.ClassifierConfig(class_labels=['dog','cat','unicorn'])
ct_model = ct.convert(
    traced_model,
    inputs=[ct.ImageType(name="image", shape=example_input.shape, scale=scale, bias=bias)],
    outputs=[ct.TensorType(name="probabilities")],
    convert_to='mlprogram',
    classifier_config=classifier_config
)

# Compressing the model -> Raises the error
compressed_model = ct.compression_utils.affine_quantize_weights(ct_model, mode = 'linear')

System environment (please complete the following information):

  • coremltools version: 6.3.0
  • OS: MacOS
  • Any other relevant version information (e.g. PyTorch or TensorFlow version): pytorch 2.0

Additional context

lukasugar avatar Jul 07 '23 14:07 lukasugar

scale and bias are not defined. So I used 1 and 0.

Here is runnable code that illustrates it is the addition of a classifier config that cause affine_quantize_weights to fail.

import timm
import torch
import coremltools as ct

# Create model
model = timm.create_model('mobilenetv3_small_100', pretrained=True, num_classes=3)

# Example input
example_input = torch.randn((1,3,224,224))

# Trace model
traced_model = torch.jit.trace(model, example_input)

ct_model = ct.convert(
    traced_model,
    inputs=[ct.ImageType(name="image", shape=example_input.shape, scale=1, bias=0)],
    outputs=[ct.TensorType(name="probabilities")],
    convert_to='mlprogram',
)

# This works
compressed_model = ct.compression_utils.affine_quantize_weights(ct_model, mode = 'linear')



classifier_config = ct.ClassifierConfig(class_labels=['dog','cat','unicorn'])
ct_model = ct.convert(
    traced_model,
    inputs=[ct.ImageType(name="image", shape=example_input.shape, scale=1, bias=0)],
    outputs=[ct.TensorType(name="probabilities")],
    convert_to='mlprogram',
    classifier_config=classifier_config
)

# This fails
compressed_model = ct.compression_utils.affine_quantize_weights(ct_model, mode = 'linear')

Using tip of main, gives the following error:

---> 11 compressed_model = ct.compression_utils.affine_quantize_weights(ct_model, mode = 'linear')

File /Volumes/DevData/workspace/coremltools/coremltools/models/_deprecation.py:28, in deprecated.<locals>.decorator_deprecation_warning.<locals>.wrapped(*args, **kwargs)
     26     msg += f"; {suffix}"
     27 warnings.warn(msg, category=FutureWarning)
---> 28 return obj(*args, **kwargs)

File /Volumes/DevData/workspace/coremltools/coremltools/models/ml_program/compression_utils.py:46, in affine_quantize_weights(mlmodel, mode, op_selector, dtype)
     44 op_config = _OpLinearQuantizerConfig(mode=mode, dtype=dtype, weight_threshold=None)
     45 config = _OptimizationConfig(global_config=op_config, is_deprecated=True, op_selector=op_selector)
---> 46 return _linear_quantize_weights(mlmodel, config)

File /Volumes/DevData/workspace/coremltools/coremltools/optimize/coreml/_post_training_quantization.py:172, in linear_quantize_weights(mlmodel, config)
     62 """
     63 Utility function to convert a float precision MLModel of type ``mlprogram``, which uses
     64 float-precision weights, into a compressed MLModel that uses 8-bit weights. This is
   (...)
    168 
    169 """
    171 linear_weight_quantizer = _linear_quantize_weights(config, fake_compression=False)
--> 172 return _apply_graph_pass(mlmodel, linear_weight_quantizer)

File /Volumes/DevData/workspace/coremltools/coremltools/optimize/coreml/_post_training_quantization.py:51, in _apply_graph_pass(mlmodel, graph_pass)
     48 graph_pass.apply(prog)
     50 # convert the pymil program back to mlmodel
---> 51 compressed_mlmodel = _mil_convert(
     52     prog,
     53     convert_to="mlprogram",
     54     convert_from="milinternal",
     55     specification_version=specification_version,
     56     compute_units=mlmodel.compute_unit,
     57     model_description=model_spec.description,
     58 )
     59 return compressed_mlmodel

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/converter.py:188, in mil_convert(model, convert_from, convert_to, compute_units, **kwargs)
    149 @_profile
    150 def mil_convert(
    151     model,
   (...)
    155     **kwargs
    156 ):
    157     """
    158     Convert model from a specified frontend `convert_from` to a specified
    159     converter backend `convert_to`.
   (...)
    186         See `coremltools.converters.convert`
    187     """
--> 188     return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/converter.py:212, in _mil_convert(model, convert_from, convert_to, registry, modelClass, compute_units, **kwargs)
    209     weights_dir = _tempfile.TemporaryDirectory()
    210     kwargs["weights_dir"] = weights_dir.name
--> 212 proto, mil_program = mil_convert_to_proto(
    213                         model,
    214                         convert_from,
    215                         convert_to,
    216                         registry,
    217                         **kwargs
    218                      )
    220 _reset_conversion_state()
    222 if convert_to == 'milinternal':

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/converter.py:289, in mil_convert_to_proto(model, convert_from, convert_to, converter_registry, main_pipeline, **kwargs)
    286 prog = frontend_converter(model, **kwargs)
    287 PassPipelineManager.apply_pipeline(prog, frontend_pipeline)
--> 289 PassPipelineManager.apply_pipeline(prog, main_pipeline)
    291 prog._check_invalid_tensor_rank()
    293 if convert_to == 'milinternal':

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/pass_pipeline.py:445, in PassPipelineManager.apply_pipeline(prog, pass_pipeline)
    443     graph_pass = PASS_REGISTRY[pass_name]
    444     graph_pass.set_options(pass_options)
--> 445     graph_pass(prog)
    446     prog.validate()
    447 logger.debug(f"Program after {pass_pipeline} pipeline:\n{prog}")

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/graph_pass.py:51, in AbstractGraphPass.__call__(self, prog)
     49 def __call__(self, prog: Program):
     50     if not prog.skip_all_passes:
---> 51         self.apply(prog)

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/defs/cleanup/const_deduplication.py:67, in const_deduplication.apply(self, prog)
     65 def apply(self, prog) -> None:
     66     for f in prog.functions.values():
---> 67         self._constant_deduplication_block(f)

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/helper.py:60, in block_context_manager.<locals>.wrapper(*args)
     55     raise ValueError(
     56         "The function decorated with block_context_manager must have a Block "
     57         "type argument as the first input."
     58     )
     59 with block:
---> 60     return func(*args)

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/defs/cleanup/const_deduplication.py:75, in const_deduplication._constant_deduplication_block(self, block)
     72     for b in op.blocks:
     73         self._constant_deduplication_block(b)
---> 75 unique2duplicates = self.find_constants(block)
     76 for unique in unique2duplicates:
     77     for duplicate in unique2duplicates[unique]:

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/passes/defs/cleanup/const_deduplication.py:105, in const_deduplication.find_constants(self, block)
    103 if op_type == "const" or op_type in self.CONSTEXPR_OPS:
    104     constant_var = op.outputs[0]
--> 105     shape = constant_var.shape
    107     numel = np.prod(shape)
    108     if numel < self.NUMEL_THRESH:

File /Volumes/DevData/workspace/coremltools/coremltools/converters/mil/mil/var.py:291, in ListVar.shape(self)
    289 @property
    290 def shape(self):
--> 291     raise ValueError("shape not applicable to ListVar '{}'.".format(self.name))

ValueError: shape not applicable to ListVar 'const_0'.

TobyRoseman avatar Jul 07 '23 22:07 TobyRoseman

Thank you Toby for making the code more readable!

lukasugar avatar Jul 08 '23 19:07 lukasugar

I have the same problem with version 7.0b1.

AtomicVar avatar Jul 10 '23 07:07 AtomicVar