diff --git a/llvm/test/CodeGen/X86/vpdpwssd.ll b/llvm/test/CodeGen/X86/vpdpwssd.ll index 3c1eb92e9e3c3f8..c2c59e6be879774 100644 --- a/llvm/test/CodeGen/X86/vpdpwssd.ll +++ b/llvm/test/CodeGen/X86/vpdpwssd.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+fast-dpwssd | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,ZNVER +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,ZNVER +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+fast-dpwssd | FileCheck %s --check-prefixes=CHECK,AVX512-VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl,+fast-dpwssd | FileCheck %s --check-prefixes=CHECK,AVX512VL-VNNI define <16 x i32> @vpdpwssd_test(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) { ; CHECK-LABEL: vpdpwssd_test: @@ -11,3 +12,165 @@ define <16 x i32> @vpdpwssd_test(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) { %4 = tail call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) ret <16 x i32> %4 } + +define <16 x i32> @vpdpwssd_v16i32_accumulate(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> %a2) { +; ZNVER-LABEL: vpdpwssd_v16i32_accumulate: +; ZNVER: # %bb.0: +; ZNVER-NEXT: vpmovsxwd %ymm0, %zmm3 +; ZNVER-NEXT: vpmovsxwd %ymm1, %zmm4 +; ZNVER-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; ZNVER-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; ZNVER-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] +; ZNVER-NEXT: vpmovsxwd %ymm0, %zmm0 +; ZNVER-NEXT: vpmovsxwd %ymm1, %zmm1 +; ZNVER-NEXT: vpmulld %zmm4, %zmm3, %zmm3 +; ZNVER-NEXT: vpmovsxbd {{.*#+}} zmm4 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] +; ZNVER-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; ZNVER-NEXT: vpermi2d %zmm0, %zmm3, %zmm5 +; ZNVER-NEXT: vpermi2d %zmm0, %zmm3, %zmm4 +; ZNVER-NEXT: vpaddd %zmm2, %zmm5, %zmm0 +; ZNVER-NEXT: vpaddd %zmm4, %zmm0, %zmm0 +; ZNVER-NEXT: retq +; +; AVX512-VNNI-LABEL: vpdpwssd_v16i32_accumulate: +; AVX512-VNNI: # %bb.0: +; AVX512-VNNI-NEXT: vpmovsxwd %ymm0, %zmm3 +; AVX512-VNNI-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512-VNNI-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512-VNNI-NEXT: vpmovsxwd %ymm1, %zmm4 +; AVX512-VNNI-NEXT: vpmulld %zmm4, %zmm3, %zmm3 +; AVX512-VNNI-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; AVX512-VNNI-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512-VNNI-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-VNNI-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] +; AVX512-VNNI-NEXT: vpermi2d %zmm0, %zmm3, %zmm1 +; AVX512-VNNI-NEXT: vpmovsxbd {{.*#+}} zmm4 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] +; AVX512-VNNI-NEXT: vpermi2d %zmm0, %zmm3, %zmm4 +; AVX512-VNNI-NEXT: vpaddd %zmm2, %zmm1, %zmm0 +; AVX512-VNNI-NEXT: vpaddd %zmm4, %zmm0, %zmm0 +; AVX512-VNNI-NEXT: retq +; +; AVX512VL-VNNI-LABEL: vpdpwssd_v16i32_accumulate: +; AVX512VL-VNNI: # %bb.0: +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm0, %zmm3 +; AVX512VL-VNNI-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm1, %zmm4 +; AVX512VL-VNNI-NEXT: vpmulld %zmm4, %zmm3, %zmm3 +; AVX512VL-VNNI-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512VL-VNNI-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512VL-VNNI-NEXT: vpmovsxbd {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] +; AVX512VL-VNNI-NEXT: vpermi2d %zmm0, %zmm3, %zmm1 +; AVX512VL-VNNI-NEXT: vpmovsxbd {{.*#+}} zmm4 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] +; AVX512VL-VNNI-NEXT: vpermi2d %zmm0, %zmm3, %zmm4 +; AVX512VL-VNNI-NEXT: vpaddd %zmm2, %zmm1, %zmm0 +; AVX512VL-VNNI-NEXT: vpaddd %zmm4, %zmm0, %zmm0 +; AVX512VL-VNNI-NEXT: retq + %x0 = sext <32 x i16> %a0 to <32 x i32> + %x1 = sext <32 x i16> %a1 to <32 x i32> + %m = mul nsw <32 x i32> %x0, %x1 + %lo = shufflevector <32 x i32> %m, <32 x i32> poison, <16 x i32> + %hi = shufflevector <32 x i32> %m, <32 x i32> poison, <16 x i32> + %r0 = add <16 x i32> %lo, %a2 + %r1 = add <16 x i32> %r0, %hi + ret <16 x i32> %r1 +} + +define <8 x i32> @vpdpwssd_v8i32_accumulate(<16 x i16> %a0, <16 x i16> %a1, <8 x i32> %a2) { +; ZNVER-LABEL: vpdpwssd_v8i32_accumulate: +; ZNVER: # %bb.0: +; ZNVER-NEXT: vpmovsxwd %ymm0, %zmm0 +; ZNVER-NEXT: vpmovsxwd %ymm1, %zmm1 +; ZNVER-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; ZNVER-NEXT: vpmovqd %zmm0, %ymm1 +; ZNVER-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; ZNVER-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7] +; ZNVER-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; ZNVER-NEXT: vpaddd %ymm2, %ymm0, %ymm0 +; ZNVER-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; ZNVER-NEXT: retq +; +; AVX512-VNNI-LABEL: vpdpwssd_v8i32_accumulate: +; AVX512-VNNI: # %bb.0: +; AVX512-VNNI-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512-VNNI-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512-VNNI-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-VNNI-NEXT: vpmovqd %zmm0, %ymm1 +; AVX512-VNNI-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; AVX512-VNNI-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7] +; AVX512-VNNI-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512-VNNI-NEXT: vpaddd %ymm2, %ymm1, %ymm1 +; AVX512-VNNI-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX512-VNNI-NEXT: retq +; +; AVX512VL-VNNI-LABEL: vpdpwssd_v8i32_accumulate: +; AVX512VL-VNNI: # %bb.0: +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512VL-VNNI-NEXT: vpmovsxwd %ymm1, %zmm1 +; AVX512VL-VNNI-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512VL-VNNI-NEXT: vpmovqd %zmm0, %ymm1 +; AVX512VL-VNNI-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; AVX512VL-VNNI-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7] +; AVX512VL-VNNI-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-VNNI-NEXT: vpaddd %ymm2, %ymm1, %ymm1 +; AVX512VL-VNNI-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX512VL-VNNI-NEXT: retq + %x0 = sext <16 x i16> %a0 to <16 x i32> + %x1 = sext <16 x i16> %a1 to <16 x i32> + %m = mul nsw <16 x i32> %x0, %x1 + %lo = shufflevector <16 x i32> %m, <16 x i32> poison, <8 x i32> + %hi = shufflevector <16 x i32> %m, <16 x i32> poison, <8 x i32> + %r0 = add <8 x i32> %hi, %a2 + %r1 = add <8 x i32> %lo, %r0 + ret <8 x i32> %r1 +} + +define <4 x i32> @vpdpwssd_v4i32_accumulate(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) { +; ZNVER-LABEL: vpdpwssd_v4i32_accumulate: +; ZNVER: # %bb.0: +; ZNVER-NEXT: vpmovsxwd %xmm0, %ymm0 +; ZNVER-NEXT: vpmovsxwd %xmm1, %ymm1 +; ZNVER-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; ZNVER-NEXT: vpmovqd %ymm0, %xmm1 +; ZNVER-NEXT: vextracti128 $1, %ymm0, %xmm3 +; ZNVER-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm3[1,3] +; ZNVER-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; ZNVER-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; ZNVER-NEXT: vzeroupper +; ZNVER-NEXT: retq +; +; AVX512-VNNI-LABEL: vpdpwssd_v4i32_accumulate: +; AVX512-VNNI: # %bb.0: +; AVX512-VNNI-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512-VNNI-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX512-VNNI-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX512-VNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-VNNI-NEXT: vshufps {{.*#+}} xmm3 = xmm0[0,2],xmm1[0,2] +; AVX512-VNNI-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] +; AVX512-VNNI-NEXT: vpaddd %xmm2, %xmm3, %xmm1 +; AVX512-VNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX512-VNNI-NEXT: vzeroupper +; AVX512-VNNI-NEXT: retq +; +; AVX512VL-VNNI-LABEL: vpdpwssd_v4i32_accumulate: +; AVX512VL-VNNI: # %bb.0: +; AVX512VL-VNNI-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512VL-VNNI-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX512VL-VNNI-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX512VL-VNNI-NEXT: vpmovqd %ymm0, %xmm1 +; AVX512VL-VNNI-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX512VL-VNNI-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm3[1,3] +; AVX512VL-VNNI-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX512VL-VNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX512VL-VNNI-NEXT: vzeroupper +; AVX512VL-VNNI-NEXT: retq + %x0 = sext <8 x i16> %a0 to <8 x i32> + %x1 = sext <8 x i16> %a1 to <8 x i32> + %m = mul nsw <8 x i32> %x0, %x1 + %lo = shufflevector <8 x i32> %m, <8 x i32> poison, <4 x i32> + %hi = shufflevector <8 x i32> %m, <8 x i32> poison, <4 x i32> + %r0 = add <4 x i32> %lo, %a2 + %r1 = add <4 x i32> %hi, %r0 + ret <4 x i32> %r1 +}