iree icon indicating copy to clipboard operation
iree copied to clipboard

Operand #1 does not dominate this use

Open pdhirajkumarprasad opened this issue 4 months ago • 2 comments

What happened?

For the given IR

module {
  func.func @main_graph(%arg0: !torch.vtensor<[1,3,224,224],f32>, %arg1: !torch.vtensor<[1,18,56,56],f32> , %arg2:!torch.vtensor<[1,18,56,56],f32> , %arg3: !torch.vtensor<[1,36,28,28],f32>,%arg5: !torch.vtensor<[1,18,56,56],f32> , %arg6: !torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,18,28,28],f32>  attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.opset_versions = {ai.onnx.contrib = 1 : si64, ai.onnx.ml = 4 : si64, ai.onnx.preview.training = 1 : si64, ai.onnx.training = 1 : si64, com.microsoft = 1 : si64, com.microsoft.experimental = 1 : si64, com.microsoft.nchwc = 1 : si64, org.pytorch.aten = 1 : si64}, torch.onnx_meta.producer_name = "vai_q_onnx", torch.onnx_meta.producer_version = "1.17.0+43059a7"} {
    %0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__stage2_stage2.0_fuse_layers.0.1_fuse_layers.0.1.2_Constant_output_0> : tensor<4xf32>} : () -> !torch.vtensor<[4],f32> 
    %165 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %166 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %167 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %168 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %169 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %170 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %171 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %172 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %173 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1718_quantized> : tensor<36x18x3x3xsi8>} : () -> !torch.vtensor<[36,18,3,3],si8> 
    %174 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %175 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %176 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1719_quantized> : tensor<36xsi8>} : () -> !torch.vtensor<[36],si8> 
    %177 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %178 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.125000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %179 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0.001953125> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %180 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %181 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1715_quantized> : tensor<18x36x1x1xsi8>} : () -> !torch.vtensor<[18,36,1,1],si8> 
    %182 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.125000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %183 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %184 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1716_quantized> : tensor<18xsi8>} : () -> !torch.vtensor<[18],si8> 
    %185 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %186 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %187 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<9.765625E-4> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %188 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %189 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1721_quantized> : tensor<72x36x3x3xsi8>} : () -> !torch.vtensor<[72,36,3,3],si8> 
    %190 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<6.250000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %191 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %192 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1722_quantized> : tensor<72xsi8>} : () -> !torch.vtensor<[72],si8> 
    %193 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %194 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %195 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1736_quantized> : tensor<36x36x3x3xsi8>} : () -> !torch.vtensor<[36,36,3,3],si8> 
    %196 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<6.250000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %197 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %198 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1737_quantized> : tensor<36xsi8>} : () -> !torch.vtensor<[36],si8> 
    %199 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %200 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %201 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %202 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<6.250000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %203 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %204 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %205 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %206 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %207 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1724_quantized> : tensor<18x18x3x3xsi8>} : () -> !torch.vtensor<[18,18,3,3],si8> 
    %208 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.562500e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %209 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %210 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1725_quantized> : tensor<18xsi8>} : () -> !torch.vtensor<[18],si8> 
    %211 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %212 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %213 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1748_quantized> : tensor<72x72x3x3xsi8>} : () -> !torch.vtensor<[72,72,3,3],si8> 
    %225 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %226 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %227 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %228 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<6.250000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %229 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %230 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %231 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<7.812500e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %232 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %233 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1727_quantized> : tensor<18x18x3x3xsi8>} : () -> !torch.vtensor<[18,18,3,3],si8> 
    %234 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.125000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %235 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %236 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1728_quantized> : tensor<18xsi8>} : () -> !torch.vtensor<[18],si8> 
    %245 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %246 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %253 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %254 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %299 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %300 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %301 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %302 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<6.250000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %303 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %304 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %305 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1760_quantized> : tensor<18x36x1x1xsi8>} : () -> !torch.vtensor<[18,36,1,1],si8> 
    %306 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.125000e-02> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %307 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %308 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1761_quantized> : tensor<18xsi8>} : () -> !torch.vtensor<[18],si8> 
    %317 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %318 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %351 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %352 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %367 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %368 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %421 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %422 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %475 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %476 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<2.500000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %487 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<3.906250e-03> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %488 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %489 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1829_quantized> : tensor<18x18x3x3xsi8>} : () -> !torch.vtensor<[18,18,3,3],si8> 
    %490 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1.250000e-01> : tensor<f32>} : () -> !torch.vtensor<[],f32> 
    %491 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si8>} : () -> !torch.vtensor<[],si8> 
    %492 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Conv_1830_quantized> : tensor<18xsi8>} : () -> !torch.vtensor<[18],si8> 
    %none = torch.constant.none
    %1553 = torch.operator "onnx.DequantizeLinear"(%181, %179, %180) : (!torch.vtensor<[18,36,1,1],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18,36,1,1],f32> 
    %1554 = torch.operator "onnx.DequantizeLinear"(%184, %182, %183) : (!torch.vtensor<[18],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18],f32> 
    %1555 = torch.operator "onnx.DequantizeLinear"(%173, %171, %172) : (!torch.vtensor<[36,18,3,3],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[36,18,3,3],f32> 
    %1556 = torch.operator "onnx.DequantizeLinear"(%176, %174, %175) : (!torch.vtensor<[36],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[36],f32> 
    %1557 = torch.operator "onnx.DequantizeLinear"(%189, %187, %188) : (!torch.vtensor<[72,36,3,3],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[72,36,3,3],f32> 
    %1558 = torch.operator "onnx.DequantizeLinear"(%192, %190, %191) : (!torch.vtensor<[72],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[72],f32> 
    %1559 = torch.operator "onnx.DequantizeLinear"(%207, %205, %206) : (!torch.vtensor<[18,18,3,3],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18,18,3,3],f32> 
    %1560 = torch.operator "onnx.DequantizeLinear"(%210, %208, %209) : (!torch.vtensor<[18],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18],f32> 
    %1561 = torch.operator "onnx.DequantizeLinear"(%233, %231, %232) : (!torch.vtensor<[18,18,3,3],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18,18,3,3],f32> 
    %1562 = torch.operator "onnx.DequantizeLinear"(%236, %234, %235) : (!torch.vtensor<[18],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18],f32> 
    %1583 = torch.operator "onnx.DequantizeLinear"(%305, %303, %304) : (!torch.vtensor<[18,36,1,1],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18,36,1,1],f32> 
    %1584 = torch.operator "onnx.DequantizeLinear"(%308, %306, %307) : (!torch.vtensor<[18],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18],f32> 
    %1629 = torch.operator "onnx.DequantizeLinear"(%489, %487, %488) : (!torch.vtensor<[18,18,3,3],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18,18,3,3],f32> 
    %1630 = torch.operator "onnx.DequantizeLinear"(%492, %490, %491) : (!torch.vtensor<[18],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[18],f32> 
    %1931 = torch.operator "onnx.Relu"(%arg5) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1932 = torch.operator "onnx.Relu"(%arg6) : (!torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1933 = torch.operator "onnx.QuantizeLinear"(%1931, %166, %165) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1934 = torch.operator "onnx.QuantizeLinear"(%1932, %168, %167) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],si8> 
    %1935 = torch.operator "onnx.DequantizeLinear"(%1933, %166, %165) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1936 = torch.operator "onnx.DequantizeLinear"(%1934, %168, %167) : (!torch.vtensor<[1,36,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1937 = torch.operator "onnx.Conv"(%1935, %1555, %1556) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64], torch.onnx.strides = [2 : si64, 2 : si64]} : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[36,18,3,3],f32>, !torch.vtensor<[36],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1938 = torch.operator "onnx.Conv"(%1936, %1553, %1554) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [1 : si64, 1 : si64], torch.onnx.pads = [0 : si64, 0 : si64, 0 : si64, 0 : si64], torch.onnx.strides = [1 : si64, 1 : si64]} : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[18,36,1,1],f32>, !torch.vtensor<[18],f32>) -> !torch.vtensor<[1,18,28,28],f32> 
    %1939 = torch.operator "onnx.QuantizeLinear"(%1937, %170, %169) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],si8> 
    %1940 = torch.operator "onnx.QuantizeLinear"(%1938, %178, %177) : (!torch.vtensor<[1,18,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,28,28],si8> 
    %1941 = torch.operator "onnx.DequantizeLinear"(%1939, %170, %169) : (!torch.vtensor<[1,36,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1942 = torch.operator "onnx.DequantizeLinear"(%1940, %178, %177) : (!torch.vtensor<[1,18,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,28,28],f32> 
    %1943 = torch.operator "onnx.Add"(%1941, %arg3) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1944 = torch.operator "onnx.Resize"(%1942, %none, %0) {torch.onnx.coordinate_transformation_mode = "asymmetric", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "nearest", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,18,28,28],f32>, !torch.none, !torch.vtensor<[4],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1945 = torch.operator "onnx.Relu"(%1943) : (!torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1946 = torch.operator "onnx.QuantizeLinear"(%1944, %178, %177) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1947 = torch.operator "onnx.QuantizeLinear"(%1945, %186, %185) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],si8> 
    %1948 = torch.operator "onnx.DequantizeLinear"(%1946, %178, %177) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1949 = torch.operator "onnx.DequantizeLinear"(%1947, %186, %185) : (!torch.vtensor<[1,36,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1950 = torch.operator "onnx.Add"(%1935, %1948) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1953 = torch.operator "onnx.Relu"(%1950) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1956 = torch.operator "onnx.QuantizeLinear"(%1953, %200, %199) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1959 = torch.operator "onnx.DequantizeLinear"(%1956, %200, %199) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1962 = torch.operator "onnx.Conv"(%1959, %1559, %1560) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64], torch.onnx.strides = [1 : si64, 1 : si64]} : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[18,18,3,3],f32>, !torch.vtensor<[18],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1965 = torch.operator "onnx.Relu"(%1962) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1968 = torch.operator "onnx.QuantizeLinear"(%1965, %226, %225) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1971 = torch.operator "onnx.DequantizeLinear"(%1968, %226, %225) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1973 = torch.operator "onnx.Add"(%arg3, %1949) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1974 = torch.operator "onnx.Conv"(%1971, %1561, %1562) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64], torch.onnx.strides = [1 : si64, 1 : si64]} : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[18,18,3,3],f32>, !torch.vtensor<[18],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1976 = torch.operator "onnx.Relu"(%1973) : (!torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1977 = torch.operator "onnx.QuantizeLinear"(%1974, %230, %229) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1979 = torch.operator "onnx.QuantizeLinear"(%1976, %246, %245) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],si8> 
    %1980 = torch.operator "onnx.DequantizeLinear"(%1977, %230, %229) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1982 = torch.operator "onnx.DequantizeLinear"(%1979, %246, %245) : (!torch.vtensor<[1,36,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],f32> 
    %1983 = torch.operator "onnx.Add"(%1980, %arg2) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1986 = torch.operator "onnx.Relu"(%1983) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %1989 = torch.operator "onnx.QuantizeLinear"(%1986, %254, %253) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %1992 = torch.operator "onnx.DequantizeLinear"(%1989, %254, %253) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2006 = torch.operator "onnx.Add"(%arg3, %1982) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %2009 = torch.operator "onnx.Relu"(%2006) : (!torch.vtensor<[1,36,28,28],f32>) -> !torch.vtensor<[1,36,28,28],f32> 
    %2012 = torch.operator "onnx.QuantizeLinear"(%2009, %300, %299) : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],si8> 
    %2015 = torch.operator "onnx.DequantizeLinear"(%2012, %300, %299) : (!torch.vtensor<[1,36,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,36,28,28],f32> 
    %2016 = torch.operator "onnx.Add"(%arg1, %1992) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2018 = torch.operator "onnx.Conv"(%2015, %1583, %1584) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [1 : si64, 1 : si64], torch.onnx.pads = [0 : si64, 0 : si64, 0 : si64, 0 : si64], torch.onnx.strides = [1 : si64, 1 : si64]} : (!torch.vtensor<[1,36,28,28],f32>, !torch.vtensor<[18,36,1,1],f32>, !torch.vtensor<[18],f32>) -> !torch.vtensor<[1,18,28,28],f32> 
    %2020 = torch.operator "onnx.Relu"(%2016) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2022 = torch.operator "onnx.QuantizeLinear"(%2018, %302, %301) : (!torch.vtensor<[1,18,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,28,28],si8> 
    %2024 = torch.operator "onnx.QuantizeLinear"(%2020, %318, %317) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2026 = torch.operator "onnx.DequantizeLinear"(%2022, %302, %301) : (!torch.vtensor<[1,18,28,28],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,28,28],f32> 
    %2028 = torch.operator "onnx.DequantizeLinear"(%2024, %318, %317) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2030 = torch.operator "onnx.Resize"(%2026, %none, %0) {torch.onnx.coordinate_transformation_mode = "asymmetric", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "nearest", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,18,28,28],f32>, !torch.none, !torch.vtensor<[4],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2035 = torch.operator "onnx.QuantizeLinear"(%2030, %302, %301) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2040 = torch.operator "onnx.DequantizeLinear"(%2035, %302, %301) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2045 = torch.operator "onnx.Add"(%2028, %2040) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2050 = torch.operator "onnx.QuantizeLinear"(%2045, %352, %351) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2055 = torch.operator "onnx.DequantizeLinear"(%2050, %352, %351) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2061 = torch.operator "onnx.Add"(%2055, %arg1) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2064 = torch.operator "onnx.Relu"(%2061) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2067 = torch.operator "onnx.QuantizeLinear"(%2064, %368, %367) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2070 = torch.operator "onnx.DequantizeLinear"(%2067, %368, %367) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2094 = torch.operator "onnx.Add"(%arg1, %2070) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2097 = torch.operator "onnx.Relu"(%2094) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2100 = torch.operator "onnx.QuantizeLinear"(%2097, %422, %421) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2103 = torch.operator "onnx.DequantizeLinear"(%2100, %422, %421) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2127 = torch.operator "onnx.Add"(%arg1, %2103) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2130 = torch.operator "onnx.Relu"(%2127) : (!torch.vtensor<[1,18,56,56],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2133 = torch.operator "onnx.QuantizeLinear"(%2130, %476, %475) : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],si8> 
    %2136 = torch.operator "onnx.DequantizeLinear"(%2133, %476, %475) : (!torch.vtensor<[1,18,56,56],si8>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[1,18,56,56],f32> 
    %2140 = torch.operator "onnx.Conv"(%2136, %1629, %1630) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64], torch.onnx.strides = [2 : si64, 2 : si64]} : (!torch.vtensor<[1,18,56,56],f32>, !torch.vtensor<[18,18,3,3],f32>, !torch.vtensor<[18],f32>) -> !torch.vtensor<[1,18,28,28],f32> 
    return %2140 : !torch.vtensor<[1,18,28,28],f32>
  }
}
model.torch_onnx.mlir:123:13: error: operand #0 does not dominate this use
    %1944 = torch.operator "onnx.Resize"(%1942, %none, %0) {torch.onnx.coordinate_transformation_mode = "asymmetric", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "nearest", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,18,28,28],f32>, !torch.none, !torch.vtensor<[4],f32>) -> !torch.vtensor<[1,18,56,56],f32> 
            ^
model.torch_onnx.mlir:123:13: note: see current operation: %325 = "tensor.extract"(%47, %8, %310, %318, %324) : (tensor<1x18x28x28xf32>, index, index, index, index) -> f32
model.torch_onnx.mlir:121:13: note: operand defined here (op in a parent region)

Steps to reproduce your issue

command:

 iree-compile model.torch_onnx.mlir --iree-hal-target-backends=llvm-cpu -o compiled_model.vmfb --iree-llvmcpu-target-cpu=host

What component(s) does this issue relate to?

Compiler

Version information

No response

Additional context

No response

pdhirajkumarprasad avatar Oct 21 '24 09:10 pdhirajkumarprasad