onnx-tensorflow
onnx-tensorflow copied to clipboard
Converting opset 16 (grid_sample function)
Hello, I converted pytorch model with grid_sample function to onnx. It was custom build with PyTorch v1.12.0-dev and onnxruntime v1.12.0-dev - opset=16 See this issue for detail: https://github.com/microsoft/onnxruntime/issues/10232 But now I can't convert it to tensorflow format.
BackendIsNotSupposedToImplementIt: GreaterOrEqual version 16 is not implemented.
I added opset 16 version for GreaterOrEqual function to onnx_tf source (opset_version.py) and rebuilt it, but got a new error.
/home/user/.local/lib/python3.7/site-packages/onnx_tf/backend_tf_module.py:99 __call__ *
output_ops = self.backend._onnx_node_to_tensorflow_op(onnx_node,
/home/user/.local/lib/python3.7/site-packages/onnx_tf/backend.py:347 _onnx_node_to_tensorflow_op **
return handler.handle(node, tensor_dict=tensor_dict, strict=strict)
/home/user/.local/lib/python3.7/site-packages/onnx_tf/handlers/handler.py:59 handle
return ver_handle(node, **kwargs)
/home/user/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/shape.py:53 version_15
return cls._common(node, **kwargs)
/home/user/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/shape.py:24 _common
x_rank = len(x_shape)
/home/user/.local/lib/python3.7/site-packages/tensorflow/python/framework/ops.py:875 __len__
"shape information.".format(self.name))
TypeError: len is not well defined for symbolic Tensors. (Shape_23:0) Please call `x.shape` rather than `len(x)` for shape information.
Is there any solution?
- Python version: 3.7.10
- ONNX version: 1.11.0
- ONNX-TF version: 1.10.0
- Tensorflow version: 2.6.3
Onnx model: https://drive.google.com/file/d/1Z5LBwva1qLKz9EN0bcMt1SXWAjkhIrv2/view?usp=sharing
Can you solve it?
- replace.json
{ "format_version": 1, "operations": [ { "op_name": "Gather_1436", "param_target": "outputs", "param_name": "onnx::Concat_2347", "post_process_transpose_perm": [0,2,1] } ] } - [experimental] convert
# 10 times onnxsim onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnxsim grid_sample.onnx grid_sample.onnx onnx2tf -i grid_sample.onnx -kat points calibs -prf replace.json - converted tflite https://drive.google.com/file/d/1jUEqHVUby6umktUOPvjvRH3xshSQVB2W/view?usp=share_link
I have no idea what kind of model it is, and I have not checked the operation of the model because it is too complicated.
I solved this problem:(add custom operator to onnx_tf)
step 1: find your installed onnx_tf ops ( /site-packages/onnx_tf/handlers/backend/ ) step 2: copy grid_sample.py to onnx_tf ops (/site-packages/onnx_tf/handlers/backend/grid_sample.py)
grid_sample.py code:
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
import copy
def grid_sample(image, coords,align_corners=False):
''' Value sampler using tf.gather_nd
Args:
image: tensor with shape (bs*h*w, h/i**2, w/i**2, 1)
coords: coordinates tensor with shape (bs*h*w,, 2r+1, 2r+1, 2), xy-indexing
mask: not implemented (same as the original implementation)
Returns:
sampled tensor with shape (bs*h*w, 2r+1, 2r+1, 1)
'''
# _, h, w, _ = image.shape
image = tf.transpose(image,perm=[0,2,3,1])
_, h, w, _ = image.get_shape().as_list()
# -> (bs*h*w, 2r+1, 2r+1)x2
gx, gy = tf.unstack(coords, axis=-1)
# print(">>> gx = ",gx)
# print(">>> gy = ",gy)
if align_corners:
gx = tf.math.multiply(tf.math.divide(tf.math.add(gx,1),2),(w - 1))
gy = tf.math.multiply(tf.math.divide(tf.math.add(gy,1),2),(h - 1))
else:
x = tf.math.divide(tf.math.multiply(tf.math.add(gx,1), w - 1), 2)
y = tf.math.divide(tf.math.multiply(tf.math.add(gy,1), h - 1), 2)
gx = tf.clip_by_value(gx, 0, w-1)
gy = tf.clip_by_value(gy, 0, h-1)
# corners: (bs*h*w, 2r+1, 2r+1)x4
gx0 = tf.floor(gx)
gx1 = tf.math.ceil(gx)
gy0 = tf.floor(gy)
gy1 = tf.math.ceil(gy)
# coordinates: (bs*h*w, 2r+1, 2r+1, 2)x4
g00 = tf.stack([gy0, gx0], axis=-1)
g01 = tf.stack([gy0, gx1], axis=-1)
g10 = tf.stack([gy1, gx0], axis=-1)
g11 = tf.stack([gy1, gx1], axis=-1)
# coefficients: (bs*h*w, 2r+1, 2r+1, 1)x4
c00 = tf.expand_dims((gy1 - gy)*(gx1 - gx), axis=-1)
c01 = tf.expand_dims((gy1 - gy)*(gx - gx0), axis=-1)
c10 = tf.expand_dims((gy - gy0)*(gx1 - gx), axis=-1)
c11 = tf.expand_dims((gy - gy0)*(gx - gx0), axis=-1)
# gathered values: (bs*h*w, 2r+1, 2r+1, 1)
x00 = tf.gather_nd(image, tf.cast(g00, dtype=tf.int32), batch_dims=1)
x01 = tf.gather_nd(image, tf.cast(g01, dtype=tf.int32), batch_dims=1)
x10 = tf.gather_nd(image, tf.cast(g10, dtype=tf.int32), batch_dims=1)
x11 = tf.gather_nd(image, tf.cast(g11, dtype=tf.int32), batch_dims=1)
output = c00 * x00 + c01 * x01 + c10 * x10 + c11 * x11
output = tf.transpose(output,perm=[0,3,1,2])
return output
@onnx_op("GridSample")
class Transpose(BackendHandler):
@classmethod
def version_16(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_20(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def make_tensor_from_onnx_node(cls,
node,
tf_func=None,
inputs=None,
attrs=None,
name="",
c_first_cuda_only=False,
c_last_only=False,
**kwargs):
tensor_dict = kwargs.get("tensor_dict", {})
if inputs is None:
inputs = [tensor_dict.get(inp, None) for inp in node.inputs]
# print("GridSimple: inputs = ",inputs[0])
if attrs is None:
attrs = copy.deepcopy(node.attrs)
name = name or node.name
if name != "":
attrs["name"] = name
return grid_sample(inputs[0],inputs[1],align_corners=attrs['align_corners']==1)#(img, coords)