From 4e47287892d9f45ee5ef5dff5a8270011673baf5 Mon Sep 17 00:00:00 2001 From: Stanley Winata Date: Tue, 26 Nov 2024 23:13:44 -0800 Subject: [PATCH] [mlperf][pkgci] Update punet-fp8 with reduction dim as last dim We have changes in sharktank that converts reduction dim of the custom attention to be the fastest dimension. This makes it more uniform with the FP16 and canonical attention form and hopefully makes optimization gets called more easily down the line with this. Additionally, this is to prefetch S.T we do not break the coming sharktank/mlperf bots and runs. Signed-off-by: Stanley Winata --- .../external_test_suite/attention_and_matmul_spec_punet.mlir | 2 +- .../regression_suite/shark-test-suite-models/sdxl/test_unet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir b/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir index 7b0944471990..16049fad2543 100644 --- a/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir +++ b/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir @@ -76,7 +76,7 @@ transform.named_sequence @match_attention_f8(%attention: !transform.any_op {tran transform.iree.match.cast_compatible_type %in0 = tensor : !transform.any_value %config = transform.param.constant #iree_codegen.compilation_info< - lowering_config = #iree_gpu.lowering_config<{workgroup = [1, 1, 64, 0, 0, 0], reduction=[0, 0, 0, 0, 64, 0], promote_operands = [1, 2]}>, + lowering_config = #iree_gpu.lowering_config<{workgroup = [1, 1, 64, 0, 0, 0], reduction=[0, 0, 0, 0, 0, 64], promote_operands = [1, 2]}>, translation_info = #iree_codegen.translation_info