2372 lines
102 KiB
LLVM
2372 lines
102 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BWVL
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX512,AVX512DQ
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512DQVL
|
|
|
|
;
|
|
; vXi64
|
|
;
|
|
|
|
define i64 @test_v2i64(<2 x i64> %a0) {
|
|
; SSE-LABEL: test_v2i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1OR2-LABEL: test_v2i64:
|
|
; AVX1OR2: # %bb.0:
|
|
; AVX1OR2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1OR2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1OR2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1OR2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX1OR2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1OR2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1OR2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1OR2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1OR2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1OR2-NEXT: vmovq %xmm0, %rax
|
|
; AVX1OR2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v2i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v2i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v2i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v2i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v4i64(<4 x i64> %a0) {
|
|
; SSE-LABEL: test_v4i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v4i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v4i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v4i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v4i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v8i64(<8 x i64> %a0) {
|
|
; SSE-LABEL: test_v8i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE-NEXT: psrlq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
|
; SSE-NEXT: paddq %xmm4, %xmm5
|
|
; SSE-NEXT: psllq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE-NEXT: paddq %xmm5, %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm4
|
|
; SSE-NEXT: paddq %xmm3, %xmm4
|
|
; SSE-NEXT: psllq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: paddq %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm5, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v8i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v8i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v8i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v8i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v16i64(<16 x i64> %a0) {
|
|
; SSE-LABEL: test_v16i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm2, %xmm8
|
|
; SSE-NEXT: psrlq $32, %xmm8
|
|
; SSE-NEXT: pmuludq %xmm6, %xmm8
|
|
; SSE-NEXT: movdqa %xmm6, %xmm9
|
|
; SSE-NEXT: psrlq $32, %xmm9
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm9
|
|
; SSE-NEXT: paddq %xmm8, %xmm9
|
|
; SSE-NEXT: psllq $32, %xmm9
|
|
; SSE-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE-NEXT: paddq %xmm9, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE-NEXT: psrlq $32, %xmm8
|
|
; SSE-NEXT: pmuludq %xmm4, %xmm8
|
|
; SSE-NEXT: movdqa %xmm4, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm6
|
|
; SSE-NEXT: paddq %xmm8, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm4, %xmm0
|
|
; SSE-NEXT: paddq %xmm6, %xmm0
|
|
; SSE-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm7, %xmm4
|
|
; SSE-NEXT: movdqa %xmm7, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm6
|
|
; SSE-NEXT: paddq %xmm4, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE-NEXT: paddq %xmm6, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm6
|
|
; SSE-NEXT: paddq %xmm4, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSE-NEXT: paddq %xmm6, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE-NEXT: psrlq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
|
; SSE-NEXT: paddq %xmm4, %xmm5
|
|
; SSE-NEXT: psllq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE-NEXT: paddq %xmm5, %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm4
|
|
; SSE-NEXT: paddq %xmm3, %xmm4
|
|
; SSE-NEXT: psllq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: paddq %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpsllq $32, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm7
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm7
|
|
; AVX1-NEXT: vpaddq %xmm6, %xmm7, %xmm6
|
|
; AVX1-NEXT: vpsllq $32, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpaddq %xmm6, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm6, %xmm3
|
|
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm5
|
|
; AVX2-NEXT: vpmuludq %ymm5, %ymm1, %ymm5
|
|
; AVX2-NEXT: vpaddq %ymm4, %ymm5, %ymm4
|
|
; AVX2-NEXT: vpsllq $32, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpaddq %ymm4, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm4, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
|
|
; AVX2-NEXT: vpsllq $32, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v16i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v16i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v16i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v16i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
;
|
|
; vXi32
|
|
;
|
|
|
|
define i32 @test_v2i32(<2 x i32> %a0) {
|
|
; SSE2-LABEL: test_v2i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v2i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v4i32(<4 x i32> %a0) {
|
|
; SSE2-LABEL: test_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v8i32(<8 x i32> %a0) {
|
|
; SSE2-LABEL: test_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v16i32(<16 x i32> %a0) {
|
|
; SSE2-LABEL: test_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,2,2]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v32i32(<32 x i32> %a0) {
|
|
; SSE2-LABEL: test_v32i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm9
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm10
|
|
; SSE2-NEXT: pmuludq %xmm9, %xmm10
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm9
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm11
|
|
; SSE2-NEXT: pmuludq %xmm9, %xmm11
|
|
; SSE2-NEXT: pmuludq %xmm10, %xmm11
|
|
; SSE2-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,2,2]
|
|
; SSE2-NEXT: pmuludq %xmm11, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm6, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm7, %xmm3
|
|
; SSE41-NEXT: pmulld %xmm5, %xmm3
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm3
|
|
; SSE41-NEXT: pmulld %xmm4, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.vector.reduce.mul.v32i32(<32 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
;
|
|
; vXi16
|
|
;
|
|
|
|
define i16 @test_v2i16(<2 x i16> %a0) {
|
|
; SSE-LABEL: test_v2i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v4i16(<4 x i16> %a0) {
|
|
; SSE-LABEL: test_v4i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movd %xmm0, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v8i16(<8 x i16> %a0) {
|
|
; SSE-LABEL: test_v8i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v16i16(<16 x i16> %a0) {
|
|
; SSE-LABEL: test_v16i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v32i16(<32 x i16> %a0) {
|
|
; SSE-LABEL: test_v32i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movd %xmm0, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v32i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v32i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v32i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v32i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v32i16(<32 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v64i16(<64 x i16> %a0) {
|
|
; SSE-LABEL: test_v64i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE-NEXT: pmullw %xmm5, %xmm3
|
|
; SSE-NEXT: pmullw %xmm1, %xmm3
|
|
; SSE-NEXT: pmullw %xmm4, %xmm2
|
|
; SSE-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
|
|
; SSE-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movd %xmm0, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v64i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v64i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v64i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm2
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v64i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i16 @llvm.vector.reduce.mul.v64i16(<64 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
;
|
|
; vXi8
|
|
;
|
|
|
|
define i8 @test_v2i8(<2 x i8> %a0) {
|
|
; SSE-LABEL: test_v2i8:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrlw $8, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v4i8(<4 x i8> %a0) {
|
|
; SSE2-LABEL: test_v4i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v8i8(<8 x i8> %a0) {
|
|
; SSE2-LABEL: test_v8i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v8i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v16i8(<16 x i8> %a0) {
|
|
; SSE2-LABEL: test_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v16i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v32i8(<32 x i8> %a0) {
|
|
; SSE2-LABEL: test_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v64i8(<64 x i8> %a0) {
|
|
; SSE2-LABEL: test_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v64i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v64i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v64i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v64i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v64i8(<64 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v128i8(<128 x i8> %a0) {
|
|
; SSE2-LABEL: test_v128i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm8
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm8, %xmm7
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm8
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm8
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm8, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm7
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm6
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v128i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm8, %xmm7
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm8, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm5
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm6
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm4
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v128i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm6, %xmm4
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm7, %xmm5
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v128i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm4, %ymm3
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v128i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
|
|
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm2, %zmm1
|
|
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v128i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm2, %zmm1
|
|
; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v128i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm2
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm4, %ymm2
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm3, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v128i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm4, %ymm2
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm3, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.vector.reduce.mul.v128i8(<128 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
;
|
|
; Legalization
|
|
;
|
|
|
|
define i8 @illegal_v4i8(i8 %a0, <4 x i8>* %a1) {
|
|
; SSE2-LABEL: illegal_v4i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movl %edi, %eax
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movd %xmm0, %ecx
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: mulb %cl
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: illegal_v4i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movl %edi, %eax
|
|
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %ecx
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: mulb %cl
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: illegal_v4i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %ecx
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: mulb %cl
|
|
; AVX-NEXT: retq
|
|
%ld = load <4 x i8>, <4 x i8>* %a1, align 4
|
|
%rdx = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %ld)
|
|
%mul = mul i8 %a0, %rdx
|
|
ret i8 %mul
|
|
}
|
|
|
|
define i8 @illegal_v8i8(i8 %a0, <8 x i8>* %a1) {
|
|
; SSE2-LABEL: illegal_v8i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movl %edi, %eax
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %ecx
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: mulb %cl
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: illegal_v8i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movl %edi, %eax
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %ecx
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: mulb %cl
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: illegal_v8i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %ecx
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: mulb %cl
|
|
; AVX-NEXT: retq
|
|
%ld = load <8 x i8>, <8 x i8>* %a1, align 4
|
|
%rdx = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %ld)
|
|
%mul = mul i8 %a0, %rdx
|
|
ret i8 %mul
|
|
}
|
|
|
|
declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
|
|
declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
|
|
declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)
|
|
declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
|
|
|
|
declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
|
|
declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>)
|
|
declare i32 @llvm.vector.reduce.mul.v32i32(<32 x i32>)
|
|
|
|
declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
|
|
declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
|
|
declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
|
|
declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
|
|
declare i16 @llvm.vector.reduce.mul.v32i16(<32 x i16>)
|
|
declare i16 @llvm.vector.reduce.mul.v64i16(<64 x i16>)
|
|
|
|
declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v64i8(<64 x i8>)
|
|
declare i8 @llvm.vector.reduce.mul.v128i8(<128 x i8>)
|