1662 lines
74 KiB
LLVM
1662 lines
74 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=XOPAVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=XOPAVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=AVX512,AVX512DQ
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512DQVL
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512BWVL
|
|
;
|
|
; 32-bit runs to make sure we do reasonable things for i64 shifts.
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX1
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86-AVX2
|
|
|
|
;
|
|
; Variable Shifts
|
|
;
|
|
|
|
define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
|
; AVX1-LABEL: var_shift_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
|
|
; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
|
|
; AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm6
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; AVX1-NEXT: vpsrlq %xmm2, %xmm6, %xmm2
|
|
; AVX1-NEXT: vpsrlq %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
|
|
; AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: var_shift_v4i64:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; XOPAVX1-NEXT: vpshaq %xmm2, %xmm4, %xmm2
|
|
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
|
|
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: var_shift_v4i64:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
|
|
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: var_shift_v4i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsravq %ymm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: var_shift_v4i64:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
|
|
; X86-AVX1-NEXT: # xmm3 = mem[0,0]
|
|
; X86-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
|
|
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
|
|
; X86-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm6
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; X86-AVX1-NEXT: vpsrlq %xmm2, %xmm6, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq %xmm5, %xmm6, %xmm5
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm4
|
|
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
|
|
; X86-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: var_shift_v4i64:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
|
|
; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <4 x i64> %a, %b
|
|
ret <4 x i64> %shift
|
|
}
|
|
|
|
define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
|
; AVX1-LABEL: var_shift_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vpsrad %xmm4, %xmm2, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpsrad %xmm5, %xmm2, %xmm5
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
|
|
; AVX1-NEXT: vpsrad %xmm6, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
|
; AVX1-NEXT: vpsrad %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
|
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
|
|
; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: var_shift_v8i32:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; XOPAVX1-NEXT: vpshad %xmm2, %xmm4, %xmm2
|
|
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
|
; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: var_shift_v8i32:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: var_shift_v8i32:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: var_shift_v8i32:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; X86-AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX1-NEXT: vpsrad %xmm4, %xmm2, %xmm4
|
|
; X86-AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
|
|
; X86-AVX1-NEXT: vpsrad %xmm5, %xmm2, %xmm5
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
|
; X86-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
|
|
; X86-AVX1-NEXT: vpsrad %xmm6, %xmm2, %xmm6
|
|
; X86-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
|
; X86-AVX1-NEXT: vpsrad %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
|
|
; X86-AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; X86-AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
|
|
; X86-AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; X86-AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: var_shift_v8i32:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <8 x i32> %a, %b
|
|
ret <8 x i32> %shift
|
|
}
|
|
|
|
define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
|
; AVX1-LABEL: var_shift_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
|
|
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; AVX1-NEXT: vpsraw $8, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpsraw $4, %xmm2, %xmm4
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsraw $2, %xmm2, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsraw $1, %xmm2, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
|
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
|
; AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
|
|
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
|
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: var_shift_v16i16:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; XOPAVX1-NEXT: vpshaw %xmm2, %xmm4, %xmm2
|
|
; XOPAVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
|
; XOPAVX1-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: var_shift_v16i16:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
|
; XOPAVX2-NEXT: vpshaw %xmm2, %xmm4, %xmm2
|
|
; XOPAVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
|
; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: var_shift_v16i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: var_shift_v16i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: var_shift_v16i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdw %zmm0, %ymm0
|
|
; AVX512DQVL-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: var_shift_v16i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: var_shift_v16i16:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; X86-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
|
|
; X86-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm2, %xmm4
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm2, %xmm4
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm2, %xmm4
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
|
|
; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: var_shift_v16i16:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; X86-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
|
; X86-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
|
; X86-AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
|
|
; X86-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
|
; X86-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
|
; X86-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
|
; X86-AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <16 x i16> %a, %b
|
|
ret <16 x i16> %shift
|
|
}
|
|
|
|
define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|
; AVX1-LABEL: var_shift_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
|
|
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
|
|
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
|
|
; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
|
|
; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: var_shift_v32i8:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; XOPAVX1-NEXT: vpshab %xmm2, %xmm4, %xmm2
|
|
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
|
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: var_shift_v32i8:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
|
; XOPAVX2-NEXT: vpshab %xmm2, %xmm4, %xmm2
|
|
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
|
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: var_shift_v32i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
|
|
; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
|
|
; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
|
|
; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: var_shift_v32i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: var_shift_v32i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX512DQVL-NEXT: vpsraw $2, %ymm3, %ymm4
|
|
; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; AVX512DQVL-NEXT: vpsraw $1, %ymm3, %ymm4
|
|
; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpsraw $4, %ymm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsraw $2, %ymm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsraw $1, %ymm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: var_shift_v32i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: var_shift_v32i8:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; X86-AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
|
|
; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm4
|
|
; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: var_shift_v32i8:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; X86-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
|
|
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; X86-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
|
|
; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
|
; X86-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
|
|
; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
|
|
; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; X86-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
|
|
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
|
|
; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
|
|
; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <32 x i8> %a, %b
|
|
ret <32 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Variable Shifts
|
|
;
|
|
|
|
define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
|
; AVX1-LABEL: splatvar_shift_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatvar_shift_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
; AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v4i64:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
|
|
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v4i64:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
|
|
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatvar_shift_v4i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsraq %xmm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatvar_shift_v4i64:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
|
|
; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatvar_shift_v4i64:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
|
|
; X86-AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
|
|
%shift = ashr <4 x i64> %a, %splat
|
|
ret <4 x i64> %shift
|
|
}
|
|
|
|
define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
|
; AVX1-LABEL: splatvar_shift_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatvar_shift_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v8i32:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; XOPAVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v8i32:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; XOPAVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX512-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatvar_shift_v8i32:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatvar_shift_v8i32:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatvar_shift_v8i32:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; X86-AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
|
%shift = ashr <8 x i32> %a, %splat
|
|
ret <8 x i32> %shift
|
|
}
|
|
|
|
define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
|
; AVX1-LABEL: splatvar_shift_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatvar_shift_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v16i16:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; XOPAVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v16i16:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; XOPAVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; AVX512-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatvar_shift_v16i16:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatvar_shift_v16i16:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatvar_shift_v16i16:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
|
; X86-AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
|
%shift = ashr <16 x i16> %a, %splat
|
|
ret <16 x i16> %shift
|
|
}
|
|
|
|
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|
; AVX1-LABEL: splatvar_shift_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; AVX1-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatvar_shift_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
|
|
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v32i8:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v32i8:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
|
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
; XOPAVX2-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
|
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: splatvar_shift_v32i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; AVX512DQ-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
|
; AVX512DQ-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
|
|
; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: splatvar_shift_v32i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
|
|
; AVX512DQVL-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX512DQVL-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
|
|
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpternlogq $108, %ymm0, %ymm2, %ymm1
|
|
; AVX512DQVL-NEXT: vpsubb %ymm2, %ymm1, %ymm0
|
|
; AVX512DQVL-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512BWVL-NEXT: vpsraw %xmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatvar_shift_v32i8:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
|
|
; X86-AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; X86-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; X86-AVX1-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
|
|
; X86-AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatvar_shift_v32i8:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X86-AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
|
; X86-AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; X86-AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
|
|
; X86-AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
|
|
; X86-AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
|
|
; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
|
%shift = ashr <32 x i8> %a, %splat
|
|
ret <32 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Constant Shifts
|
|
;
|
|
|
|
define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
|
|
; AVX1-LABEL: constant_shift_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4294967296,2]
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4611686018427387904,72057594037927936]
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
|
|
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: constant_shift_v4i64:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: constant_shift_v4i64:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
|
|
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
|
|
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: constant_shift_v4i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: constant_shift_v4i64:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,0]
|
|
; X86-AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1073741824,0,16777216]
|
|
; X86-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: constant_shift_v4i64:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
|
|
; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
|
|
; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
|
|
ret <4 x i64> %shift
|
|
}
|
|
|
|
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
|
|
; AVX1-LABEL: constant_shift_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
; AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrad $4, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrad $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vpsrad $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: constant_shift_v8i32:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: constant_shift_v8i32:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: constant_shift_v8i32:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: constant_shift_v8i32:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrad $4, %xmm0, %xmm3
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrad $9, %xmm0, %xmm3
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; X86-AVX1-NEXT: vpsrad $8, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: constant_shift_v8i32:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
|
|
ret <8 x i32> %shift
|
|
}
|
|
|
|
define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
|
; AVX1-LABEL: constant_shift_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm1
|
|
; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3,4,5,6,7]
|
|
; AVX2-NEXT: vpsraw $1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5,6,7]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: constant_shift_v16i16:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; XOPAVX1-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: constant_shift_v16i16:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm1
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; XOPAVX2-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: constant_shift_v16i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: constant_shift_v16i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
|
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: constant_shift_v16i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdw %zmm0, %ymm0
|
|
; AVX512DQVL-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: constant_shift_v16i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: constant_shift_v16i16:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
|
|
; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7]
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: constant_shift_v16i16:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpmulhw {{\.LCPI.*}}, %ymm0, %ymm1
|
|
; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3,4,5,6,7]
|
|
; X86-AVX2-NEXT: vpsraw $1, %xmm0, %xmm0
|
|
; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5,6,7]
|
|
; X86-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
|
ret <16 x i16> %shift
|
|
}
|
|
|
|
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
|
; AVX1-LABEL: constant_shift_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2,4,8,16,32,64,128,256]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,128,64,32,16,8,4,2]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpsraw $8, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: constant_shift_v32i8:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,254,253,252,251,250,249,249,250,251,252,253,254,255,0]
|
|
; XOPAVX1-NEXT: vpshab %xmm2, %xmm1, %xmm1
|
|
; XOPAVX1-NEXT: vpshab %xmm2, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: constant_shift_v32i8:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,254,253,252,251,250,249,249,250,251,252,253,254,255,0]
|
|
; XOPAVX2-NEXT: vpshab %xmm2, %xmm1, %xmm1
|
|
; XOPAVX2-NEXT: vpshab %xmm2, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: constant_shift_v32i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQ-NEXT: vpsraw $8, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQ-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: constant_shift_v32i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: constant_shift_v32i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX512DQVL-NEXT: vpsraw $8, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX512DQVL-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: constant_shift_v32i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: constant_shift_v32i8:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2,4,8,16,32,64,128,256]
|
|
; X86-AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,128,64,32,16,8,4,2]
|
|
; X86-AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X86-AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: constant_shift_v32i8:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; X86-AVX2-NEXT: vpsraw $8, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; X86-AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
|
ret <32 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Constant Shifts
|
|
;
|
|
|
|
define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
|
|
; AVX1-LABEL: splatconstant_shift_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $7, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpsrlq $7, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatconstant_shift_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrad $7, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18446744073709551609,18446744073709551609]
|
|
; XOPAVX1-NEXT: vpshaq %xmm2, %xmm1, %xmm1
|
|
; XOPAVX1-NEXT: vpshaq %xmm2, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsrad $7, %ymm0, %ymm1
|
|
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
|
|
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatconstant_shift_v4i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsraq $7, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatconstant_shift_v4i64:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; X86-AVX1-NEXT: vpsrad $7, %xmm1, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq $7, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
|
; X86-AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
|
|
; X86-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatconstant_shift_v4i64:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsrad $7, %ymm0, %ymm1
|
|
; X86-AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
|
|
ret <4 x i64> %shift
|
|
}
|
|
|
|
define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
|
|
; AVX1-LABEL: splatconstant_shift_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatconstant_shift_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsrad $5, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsrad $5, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatconstant_shift_v8i32:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsrad $5, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatconstant_shift_v8i32:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatconstant_shift_v8i32:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
|
|
ret <8 x i32> %shift
|
|
}
|
|
|
|
define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
|
|
; AVX1-LABEL: splatconstant_shift_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatconstant_shift_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatconstant_shift_v16i16:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatconstant_shift_v16i16:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatconstant_shift_v16i16:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
|
ret <16 x i16> %shift
|
|
}
|
|
|
|
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
|
|
; AVX1-LABEL: splatconstant_shift_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
|
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatconstant_shift_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
|
|
; XOPAVX1: # %bb.0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253]
|
|
; XOPAVX1-NEXT: vpshab %xmm2, %xmm1, %xmm1
|
|
; XOPAVX1-NEXT: vpshab %xmm2, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
|
|
; XOPAVX2: # %bb.0:
|
|
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: splatconstant_shift_v32i8:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX512VL-NEXT: vpternlogq $108, {{.*}}(%rip), %ymm1, %ymm0
|
|
; AVX512VL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: retq
|
|
;
|
|
; X86-AVX1-LABEL: splatconstant_shift_v32i8:
|
|
; X86-AVX1: # %bb.0:
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
|
; X86-AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; X86-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
|
|
; X86-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X86-AVX1-NEXT: retl
|
|
;
|
|
; X86-AVX2-LABEL: splatconstant_shift_v32i8:
|
|
; X86-AVX2: # %bb.0:
|
|
; X86-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; X86-AVX2-NEXT: retl
|
|
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
|
ret <32 x i8> %shift
|
|
}
|