ValueTracking: Handle amdgcn_exp2 in computeKnownFPClass
The base exp handling looks pretty incomplete.
-
#172496
-
#172495
👈 (View in Graphite)
-
#172494
-
main
This stack of pull requests is managed by Graphite. Learn more about stacking.
@llvm/pr-subscribers-llvm-analysis @llvm/pr-subscribers-llvm-transforms
@llvm/pr-subscribers-backend-amdgpu
Author: Matt Arsenault (arsenm)
Changes
The base exp handling looks pretty incomplete.
Full diff: https://github.com/llvm/llvm-project/pull/172495.diff
2 Files Affected:
- (modified) llvm/lib/Analysis/ValueTracking.cpp (+6-1)
- (modified) llvm/test/Transforms/Attributor/AMDGPU/nofpclass-amdgcn-exp.ll (+24-24)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 045cbab221ac3..0b57e27c92331 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -5374,7 +5374,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
}
case Intrinsic::exp:
case Intrinsic::exp2:
- case Intrinsic::exp10: {
+ case Intrinsic::exp10:
+ case Intrinsic::amdgcn_exp2: {
Known.knownNot(fcNegative);
if ((InterestedClasses & fcNan) == fcNone)
break;
@@ -5387,6 +5388,10 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
Known.signBitMustBeZero();
}
+ Type *EltTy = II->getType()->getScalarType();
+ if (IID == Intrinsic::amdgcn_exp2 && EltTy->isFloatTy())
+ Known.knownNot(fcSubnormal);
+
break;
}
case Intrinsic::fptrunc_round: {
diff --git a/llvm/test/Transforms/Attributor/AMDGPU/nofpclass-amdgcn-exp.ll b/llvm/test/Transforms/Attributor/AMDGPU/nofpclass-amdgcn-exp.ll
index babd1c68f161d..e7c41e9be7e33 100644
--- a/llvm/test/Transforms/Attributor/AMDGPU/nofpclass-amdgcn-exp.ll
+++ b/llvm/test/Transforms/Attributor/AMDGPU/nofpclass-amdgcn-exp.ll
@@ -4,9 +4,9 @@
declare float @llvm.amdgcn.exp2.f32(float)
define half @ret_exp_f16(half %arg0) {
-; CHECK-LABEL: define half @ret_exp_f16(
+; CHECK-LABEL: define nofpclass(ninf nzero nsub nnorm) half @ret_exp_f16(
; CHECK-SAME: half [[ARG0:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[CALL:%.*]] = call half @llvm.amdgcn.exp2.f16(half [[ARG0]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero nsub nnorm) half @llvm.amdgcn.exp2.f16(half [[ARG0]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: ret half [[CALL]]
;
%call = call half @llvm.amdgcn.exp2.f16(half %arg0)
@@ -14,9 +14,9 @@ define half @ret_exp_f16(half %arg0) {
}
define float @ret_exp_f32(float %arg0) {
-; CHECK-LABEL: define float @ret_exp_f32(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_f32(
; CHECK-SAME: float [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -24,9 +24,9 @@ define float @ret_exp_f32(float %arg0) {
}
define float @ret_exp_noinf(float nofpclass(inf) %arg0) {
-; CHECK-LABEL: define float @ret_exp_noinf(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_noinf(
; CHECK-SAME: float nofpclass(inf) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(inf) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(inf) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -34,9 +34,9 @@ define float @ret_exp_noinf(float nofpclass(inf) %arg0) {
}
define float @ret_exp_nopinf(float nofpclass(pinf) %arg0) {
-; CHECK-LABEL: define float @ret_exp_nopinf(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_nopinf(
; CHECK-SAME: float nofpclass(pinf) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(pinf) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(pinf) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -44,9 +44,9 @@ define float @ret_exp_nopinf(float nofpclass(pinf) %arg0) {
}
define float @ret_exp_noninf(float nofpclass(ninf) %arg0) {
-; CHECK-LABEL: define float @ret_exp_noninf(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_noninf(
; CHECK-SAME: float nofpclass(ninf) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(ninf) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(ninf) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -54,9 +54,9 @@ define float @ret_exp_noninf(float nofpclass(ninf) %arg0) {
}
define float @ret_exp_nonan(float nofpclass(nan) %arg0) {
-; CHECK-LABEL: define float @ret_exp_nonan(
+; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @ret_exp_nonan(
; CHECK-SAME: float nofpclass(nan) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(nan) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(nan) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -64,9 +64,9 @@ define float @ret_exp_nonan(float nofpclass(nan) %arg0) {
}
define float @ret_exp_nonan_noinf(float nofpclass(nan inf) %arg0) {
-; CHECK-LABEL: define float @ret_exp_nonan_noinf(
+; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @ret_exp_nonan_noinf(
; CHECK-SAME: float nofpclass(nan inf) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(nan inf) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(nan inf) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -74,9 +74,9 @@ define float @ret_exp_nonan_noinf(float nofpclass(nan inf) %arg0) {
}
define float @ret_exp_nonan_noinf_nozero(float nofpclass(nan inf zero) %arg0) {
-; CHECK-LABEL: define float @ret_exp_nonan_noinf_nozero(
+; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @ret_exp_nonan_noinf_nozero(
; CHECK-SAME: float nofpclass(nan inf zero) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(nan inf zero) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(nan inf zero) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -84,9 +84,9 @@ define float @ret_exp_nonan_noinf_nozero(float nofpclass(nan inf zero) %arg0) {
}
define float @ret_exp_noinf_nozero(float nofpclass(inf zero) %arg0) {
-; CHECK-LABEL: define float @ret_exp_noinf_nozero(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_noinf_nozero(
; CHECK-SAME: float nofpclass(inf zero) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(inf zero) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(inf zero) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -94,9 +94,9 @@ define float @ret_exp_noinf_nozero(float nofpclass(inf zero) %arg0) {
}
define float @ret_exp_noinf_nonegzero(float nofpclass(inf nzero) %arg0) {
-; CHECK-LABEL: define float @ret_exp_noinf_nonegzero(
+; CHECK-LABEL: define nofpclass(ninf nzero sub nnorm) float @ret_exp_noinf_nonegzero(
; CHECK-SAME: float nofpclass(inf nzero) [[ARG0:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float nofpclass(inf nzero) [[ARG0]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float nofpclass(inf nzero) [[ARG0]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = call float @llvm.amdgcn.exp2.f32(float %arg0)
@@ -104,10 +104,10 @@ define float @ret_exp_noinf_nonegzero(float nofpclass(inf nzero) %arg0) {
}
define float @ret_exp_positive_source(i32 %arg) {
-; CHECK-LABEL: define float @ret_exp_positive_source(
+; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @ret_exp_positive_source(
; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[UITOFP:%.*]] = uitofp i32 [[ARG]] to float
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float [[UITOFP]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float [[UITOFP]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%uitofp = uitofp i32 %arg to float
@@ -117,10 +117,10 @@ define float @ret_exp_positive_source(i32 %arg) {
; Could produce a nan because we don't know if the multiply is negative.
define float @ret_exp_unknown_sign(float nofpclass(nan) %arg0, float nofpclass(nan) %arg1) {
-; CHECK-LABEL: define float @ret_exp_unknown_sign(
+; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @ret_exp_unknown_sign(
; CHECK-SAME: float nofpclass(nan) [[ARG0:%.*]], float nofpclass(nan) [[ARG1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[UNKNOWN_SIGN_NOT_NAN:%.*]] = fmul nnan float [[ARG0]], [[ARG1]]
-; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.amdgcn.exp2.f32(float [[UNKNOWN_SIGN_NOT_NAN]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.amdgcn.exp2.f32(float [[UNKNOWN_SIGN_NOT_NAN]]) #[[ATTR2]]
; CHECK-NEXT: ret float [[CALL]]
;
%unknown.sign.not.nan = fmul nnan float %arg0, %arg1
:penguin: Linux x64 Test Results
- 167260 tests passed
- 2959 tests skipped
- 1 test failed
Failed Tests
(click on a test name to see its output)
MLIR
MLIR.Dialect/XeGPU/propagate-layout-subgroup.mlir (Likely Already Failing)
This test is already failing at the base commit.Exit Code: 1
Command Output (stdout):
--
# RUN: at line 1
/home/gha/actions-runner/_work/llvm-project/llvm-project/build/bin/mlir-opt -xevm-attach-target='chip=pvc' -xegpu-propagate-layout="layout-kind=subgroup" -split-input-file /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir | /home/gha/actions-runner/_work/llvm-project/llvm-project/build/bin/FileCheck /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir
# executed command: /home/gha/actions-runner/_work/llvm-project/llvm-project/build/bin/mlir-opt -xevm-attach-target=chip=pvc -xegpu-propagate-layout=layout-kind=subgroup -split-input-file /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir
# note: command had no output on stdout or stderr
# executed command: /home/gha/actions-runner/_work/llvm-project/llvm-project/build/bin/FileCheck /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir
# .---command stderr------------
# | /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir:10:17: error: CHECK-SAME: expected string not found in input
# | // CHECK-SAME: {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}
# | ^
# | <stdin>:5:90: note: scanning from here
# | %1 = xegpu.load_nd %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>> -> vector<256x128xf32>
# | ^
# | <stdin>:6:17: note: possible intended match here
# | xegpu.store_nd %1, %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : vector<256x128xf32>, !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | ^
# | /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir:36:17: error: CHECK-SAME: expected string not found in input
# | // CHECK-SAME: {layout_result_0 = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>} :
# | ^
# | <stdin>:18:112: note: scanning from here
# | %2 = xegpu.load_nd %0[0, 0] <{layout = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>> -> vector<256x128xf32>
# | ^
# | <stdin>:19:35: note: possible intended match here
# | %3 = vector.transpose %2, [1, 0] {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>} : vector<256x128xf32> to vector<128x256xf32>
# | ^
# |
# | Input file: <stdin>
# | Check file: /home/gha/actions-runner/_work/llvm-project/llvm-project/mlir/test/Dialect/XeGPU/propagate-layout-subgroup.mlir
# |
# | -dump-input=help explains the following input dump.
# |
# | Input was:
# | <<<<<<
# | 1: module {
# | 2: gpu.module @test [#xevm.target<chip = "pvc">] {
# | 3: func.func @store_nd(%arg0: memref<256x128xf32>) {
# | 4: %0 = xegpu.create_nd_tdesc %arg0 : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | 5: %1 = xegpu.load_nd %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>> -> vector<256x128xf32>
# | same:10'0 X~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: no match found
# | 6: xegpu.store_nd %1, %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : vector<256x128xf32>, !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | same:10'1 ? possible intended match
# | 7: return
# | same:10'0 ~~~~~~~~
# | 8: }
# | same:10'0 ~~~
# | 9: }
# | same:10'0 ~~~
# | 10: }
# | same:10'0 ~~
# | 11:
# | same:10'0 ~
# | 12: // -----
# | same:10'0 ~~~~~~~~~
# | 13: module {
# | same:10'0 ~~~~~~~~~
# | 14: gpu.module @test [#xevm.target<chip = "pvc">] {
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 15: func.func @vector_transpose(%arg0: memref<256x128xf32>, %arg1: memref<128x256xf32>) {
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 16: %0 = xegpu.create_nd_tdesc %arg0 : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>>
# | 17: %1 = xegpu.create_nd_tdesc %arg1 : memref<128x256xf32> -> !xegpu.tensor_desc<128x256xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>>
# | 18: %2 = xegpu.load_nd %0[0, 0] <{layout = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>> -> vector<256x128xf32>
# | same:36'0 X~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: no match found
# | 19: %3 = vector.transpose %2, [1, 0] {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>} : vector<256x128xf32> to vector<128x256xf32>
# | same:36'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | same:36'1 ? possible intended match
# | 20: xegpu.store_nd %3, %1[0, 0] <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>}> : vector<128x256xf32>, !xegpu.tensor_desc<128x256xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>>
# | same:36'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 21: return
# | same:36'0 ~~~~~~~~
# | 22: }
# | same:36'0 ~~~
# | 23: }
# | same:36'0 ~~~
# | 24: }
# | same:36'0 ~~
# | 25:
# | same:36'0 ~
# | >>>>>>
# `-----------------------------
# error: command failed with exit status: 1
--
If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the infrastructure label.
:window: Windows x64 Test Results
- 128746 tests passed
- 2826 tests skipped
- 1 test failed
Failed Tests
(click on a test name to see its output)
MLIR
MLIR.Dialect/XeGPU/propagate-layout-subgroup.mlir
Exit Code: 1
Command Output (stdout):
--
# RUN: at line 1
c:\_work\llvm-project\llvm-project\build\bin\mlir-opt.exe -xevm-attach-target='chip=pvc' -xegpu-propagate-layout="layout-kind=subgroup" -split-input-file C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir | c:\_work\llvm-project\llvm-project\build\bin\filecheck.exe C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir
# executed command: 'c:\_work\llvm-project\llvm-project\build\bin\mlir-opt.exe' -xevm-attach-target=chip=pvc -xegpu-propagate-layout=layout-kind=subgroup -split-input-file 'C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir'
# note: command had no output on stdout or stderr
# executed command: 'c:\_work\llvm-project\llvm-project\build\bin\filecheck.exe' 'C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir'
# .---command stderr------------
# | C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir:10:17: error: CHECK-SAME: expected string not found in input
# | // CHECK-SAME: {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}
# | ^
# | <stdin>:5:90: note: scanning from here
# | %1 = xegpu.load_nd %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>> -> vector<256x128xf32>
# | ^
# | <stdin>:6:17: note: possible intended match here
# | xegpu.store_nd %1, %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : vector<256x128xf32>, !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | ^
# | C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir:36:17: error: CHECK-SAME: expected string not found in input
# | // CHECK-SAME: {layout_result_0 = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>} :
# | ^
# | <stdin>:18:112: note: scanning from here
# | %2 = xegpu.load_nd %0[0, 0] <{layout = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>> -> vector<256x128xf32>
# | ^
# | <stdin>:19:35: note: possible intended match here
# | %3 = vector.transpose %2, [1, 0] {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>} : vector<256x128xf32> to vector<128x256xf32>
# | ^
# |
# | Input file: <stdin>
# | Check file: C:\_work\llvm-project\llvm-project\mlir\test\Dialect\XeGPU\propagate-layout-subgroup.mlir
# |
# | -dump-input=help explains the following input dump.
# |
# | Input was:
# | <<<<<<
# | 1: module {
# | 2: gpu.module @test [#xevm.target<chip = "pvc">] {
# | 3: func.func @store_nd(%arg0: memref<256x128xf32>) {
# | 4: %0 = xegpu.create_nd_tdesc %arg0 : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | 5: %1 = xegpu.load_nd %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>> -> vector<256x128xf32>
# | same:10'0 X~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: no match found
# | 6: xegpu.store_nd %1, %0 <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>}> : vector<256x128xf32>, !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>>
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | same:10'1 ? possible intended match
# | 7: return
# | same:10'0 ~~~~~~~~
# | 8: }
# | same:10'0 ~~~
# | 9: }
# | same:10'0 ~~~
# | 10: }
# | same:10'0 ~~
# | 11:
# | same:10'0 ~
# | 12: // -----
# | same:10'0 ~~~~~~~~~
# | 13: module {
# | same:10'0 ~~~~~~~~~
# | 14: gpu.module @test [#xevm.target<chip = "pvc">] {
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 15: func.func @vector_transpose(%arg0: memref<256x128xf32>, %arg1: memref<128x256xf32>) {
# | same:10'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 16: %0 = xegpu.create_nd_tdesc %arg0 : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>>
# | 17: %1 = xegpu.create_nd_tdesc %arg1 : memref<128x256xf32> -> !xegpu.tensor_desc<128x256xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>>
# | 18: %2 = xegpu.load_nd %0[0, 0] <{layout = #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>}> : !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [4, 8], sg_data = [64, 32], order = [0, 1]>> -> vector<256x128xf32>
# | same:36'0 X~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: no match found
# | 19: %3 = vector.transpose %2, [1, 0] {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>} : vector<256x128xf32> to vector<128x256xf32>
# | same:36'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | same:36'1 ? possible intended match
# | 20: xegpu.store_nd %3, %1[0, 0] <{layout = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>}> : vector<128x256xf32>, !xegpu.tensor_desc<128x256xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], order = [1, 0]>>
# | same:36'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# | 21: return
# | same:36'0 ~~~~~~~~
# | 22: }
# | same:36'0 ~~~
# | 23: }
# | same:36'0 ~~~
# | 24: }
# | same:36'0 ~~
# | 25:
# | same:36'0 ~
# | >>>>>>
# `-----------------------------
# error: command failed with exit status: 1
--
If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the infrastructure label.