1108 lines
43 KiB
LLVM
1108 lines
43 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW
|
||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL
|
||
|
|
||
|
;
|
||
|
; vXf32
|
||
|
;
|
||
|
|
||
|
define float @test_v1f32(<1 x float> %a0) {
|
||
|
; ALL-LABEL: test_v1f32:
|
||
|
; ALL: # %bb.0:
|
||
|
; ALL-NEXT: retq
|
||
|
%1 = call float @llvm.vector.reduce.fmin.v1f32(<1 x float> %a0)
|
||
|
ret float %1
|
||
|
}
|
||
|
|
||
|
define float @test_v2f32(<2 x float> %a0) {
|
||
|
; SSE2-LABEL: test_v2f32:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v2f32:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm2
|
||
|
; SSE41-NEXT: andnps %xmm2, %xmm1
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v2f32:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v2f32:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||
|
; AVX512-NEXT: vminss %xmm0, %xmm2, %xmm1
|
||
|
; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vmovaps %xmm1, %xmm0
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %a0)
|
||
|
ret float %1
|
||
|
}
|
||
|
|
||
|
define float @test_v4f32(<4 x float> %a0) {
|
||
|
; SSE2-LABEL: test_v4f32:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm3
|
||
|
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE2-NEXT: andps %xmm3, %xmm4
|
||
|
; SSE2-NEXT: minss %xmm0, %xmm3
|
||
|
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
||
|
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||
|
; SSE2-NEXT: andnps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm3
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE2-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v4f32:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm2
|
||
|
; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE41-NEXT: andps %xmm3, %xmm4
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm3
|
||
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
||
|
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||
|
; SSE41-NEXT: andnps %xmm3, %xmm1
|
||
|
; SSE41-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm1, %xmm3
|
||
|
; SSE41-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE41-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE41-NEXT: andps %xmm2, %xmm1
|
||
|
; SSE41-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm2
|
||
|
; SSE41-NEXT: minss %xmm1, %xmm2
|
||
|
; SSE41-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE41-NEXT: andnps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v4f32:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm3, %xmm4
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm0, %xmm3, %xmm4, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm2, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm3, %xmm2, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v4f32:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||
|
; AVX512-NEXT: vminss %xmm0, %xmm3, %xmm4
|
||
|
; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
|
||
|
; AVX512-NEXT: vminss %xmm4, %xmm2, %xmm0
|
||
|
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm0
|
||
|
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a0)
|
||
|
ret float %1
|
||
|
}
|
||
|
|
||
|
define float @test_v8f32(<8 x float> %a0) {
|
||
|
; SSE2-LABEL: test_v8f32:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm2
|
||
|
; SSE2-NEXT: minps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: cmpunordps %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm0
|
||
|
; SSE2-NEXT: orps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm3
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE2-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
||
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v8f32:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm2
|
||
|
; SSE41-NEXT: minps %xmm0, %xmm2
|
||
|
; SSE41-NEXT: cmpunordps %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm2
|
||
|
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||
|
; SSE41-NEXT: cmpunordss %xmm2, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm1, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm2, %xmm1
|
||
|
; SSE41-NEXT: andnps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm1
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm3
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm4
|
||
|
; SSE41-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE41-NEXT: andps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm4, %xmm0
|
||
|
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm1
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm1
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andnps %xmm1, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v8f32:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX-NEXT: vpermilps {{.*#+}} xmm8 = xmm1[3,3,3,3]
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||
|
; AVX-NEXT: vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm7, %xmm2
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm0, %xmm7, %xmm2, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm6, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm6, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm5, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm5, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm4, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm1, %xmm4, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm3, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm1, %xmm3, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm8, %xmm0
|
||
|
; AVX-NEXT: vblendvps %xmm1, %xmm8, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512BW-LABEL: test_v8f32:
|
||
|
; AVX512BW: # %bb.0:
|
||
|
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm8 = xmm3[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm7, %xmm1
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vmovss %xmm7, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm1, %xmm1, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm1, %xmm6, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm6, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm5, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm5, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm3, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm3, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm4, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm4, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm2, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm8, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm8, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vzeroupper
|
||
|
; AVX512BW-NEXT: retq
|
||
|
;
|
||
|
; AVX512VL-LABEL: test_v8f32:
|
||
|
; AVX512VL: # %bb.0:
|
||
|
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm8 = xmm1[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm7, %xmm2
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vmovss %xmm7, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm2, %xmm2, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm2, %xmm6, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm6, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm5, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm5, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm1, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm4, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm4, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm3, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm3, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm8, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm8, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vzeroupper
|
||
|
; AVX512VL-NEXT: retq
|
||
|
%1 = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a0)
|
||
|
ret float %1
|
||
|
}
|
||
|
|
||
|
define float @test_v16f32(<16 x float> %a0) {
|
||
|
; SSE2-LABEL: test_v16f32:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movaps %xmm2, %xmm4
|
||
|
; SSE2-NEXT: minps %xmm0, %xmm4
|
||
|
; SSE2-NEXT: cmpunordps %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnps %xmm4, %xmm0
|
||
|
; SSE2-NEXT: orps %xmm2, %xmm0
|
||
|
; SSE2-NEXT: movaps %xmm3, %xmm2
|
||
|
; SSE2-NEXT: minps %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordps %xmm1, %xmm1
|
||
|
; SSE2-NEXT: andps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm2
|
||
|
; SSE2-NEXT: minps %xmm0, %xmm2
|
||
|
; SSE2-NEXT: cmpunordps %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm0
|
||
|
; SSE2-NEXT: orps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
|
||
|
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordss %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm3
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm4
|
||
|
; SSE2-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE2-NEXT: andps %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm4, %xmm1
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
||
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||
|
; SSE2-NEXT: minss %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordss %xmm1, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnps %xmm2, %xmm3
|
||
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||
|
; SSE2-NEXT: orps %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v16f32:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm4
|
||
|
; SSE41-NEXT: minps %xmm0, %xmm4
|
||
|
; SSE41-NEXT: cmpunordps %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm4
|
||
|
; SSE41-NEXT: movaps %xmm3, %xmm2
|
||
|
; SSE41-NEXT: minps %xmm1, %xmm2
|
||
|
; SSE41-NEXT: cmpunordps %xmm1, %xmm1
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: blendvps %xmm0, %xmm3, %xmm2
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm1
|
||
|
; SSE41-NEXT: minps %xmm4, %xmm1
|
||
|
; SSE41-NEXT: cmpunordps %xmm4, %xmm4
|
||
|
; SSE41-NEXT: movaps %xmm4, %xmm0
|
||
|
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm1
|
||
|
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: cmpunordss %xmm1, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm1, %xmm2
|
||
|
; SSE41-NEXT: andnps %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm2
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
|
||
|
; SSE41-NEXT: movaps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm3
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm4
|
||
|
; SSE41-NEXT: andnps %xmm3, %xmm4
|
||
|
; SSE41-NEXT: andps %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm4, %xmm0
|
||
|
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
||
|
; SSE41-NEXT: movaps %xmm1, %xmm2
|
||
|
; SSE41-NEXT: minss %xmm0, %xmm2
|
||
|
; SSE41-NEXT: cmpunordss %xmm0, %xmm0
|
||
|
; SSE41-NEXT: movaps %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andnps %xmm2, %xmm3
|
||
|
; SSE41-NEXT: andps %xmm1, %xmm0
|
||
|
; SSE41-NEXT: orps %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v16f32:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vminps %ymm0, %ymm1, %ymm2
|
||
|
; AVX-NEXT: vcmpunordps %ymm0, %ymm0, %ymm0
|
||
|
; AVX-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||
|
; AVX-NEXT: vminss %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm3, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm3, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm3, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm3, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
||
|
; AVX-NEXT: vminss %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512BW-LABEL: test_v16f32:
|
||
|
; AVX512BW: # %bb.0:
|
||
|
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vminss %xmm0, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm1, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm2
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm1, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm2, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm1, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm1, %xmm3
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
|
||
|
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm3, %xmm1, %xmm2
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm2, %xmm0, %xmm2
|
||
|
; AVX512BW-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm2, %xmm1, %xmm2
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm2, %xmm1, %xmm2
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
|
||
|
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
|
||
|
; AVX512BW-NEXT: vminss %xmm2, %xmm1, %xmm0
|
||
|
; AVX512BW-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vzeroupper
|
||
|
; AVX512BW-NEXT: retq
|
||
|
;
|
||
|
; AVX512VL-LABEL: test_v16f32:
|
||
|
; AVX512VL: # %bb.0:
|
||
|
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm3
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm8 = xmm3[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm9 = xmm3[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm10 = xmm3[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm6
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm11 = xmm6[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm12 = xmm6[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm13 = xmm6[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm14 = xmm2[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm15 = xmm2[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm16 = xmm2[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
|
||
|
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm5, %xmm7
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vmovss %xmm5, %xmm7, %xmm7 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm7, %xmm7, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm7, %xmm4, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm4, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm1, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm2, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm16, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm16, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm15, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm15, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm14, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm14, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm6, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm6, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm13, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm13, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm12, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm12, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm11, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm11, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm3, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm3, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm10, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm10, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm9, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm9, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminss %xmm0, %xmm8, %xmm0
|
||
|
; AVX512VL-NEXT: vmovss %xmm8, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vzeroupper
|
||
|
; AVX512VL-NEXT: retq
|
||
|
%1 = call float @llvm.vector.reduce.fmin.v16f32(<16 x float> %a0)
|
||
|
ret float %1
|
||
|
}
|
||
|
|
||
|
;
|
||
|
; vXf64
|
||
|
;
|
||
|
|
||
|
define double @test_v2f64(<2 x double> %a0) {
|
||
|
; SSE-LABEL: test_v2f64:
|
||
|
; SSE: # %bb.0:
|
||
|
; SSE-NEXT: movapd %xmm0, %xmm2
|
||
|
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
|
||
|
; SSE-NEXT: movapd %xmm0, %xmm1
|
||
|
; SSE-NEXT: cmpunordsd %xmm0, %xmm1
|
||
|
; SSE-NEXT: movapd %xmm1, %xmm3
|
||
|
; SSE-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE-NEXT: minsd %xmm0, %xmm2
|
||
|
; SSE-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v2f64:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v2f64:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm0, %xmm2, %xmm1
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vmovsd %xmm2, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vmovapd %xmm1, %xmm0
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %a0)
|
||
|
ret double %1
|
||
|
}
|
||
|
|
||
|
define double @test_v3f64(<3 x double> %a0) {
|
||
|
; SSE2-LABEL: test_v3f64:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||
|
; SSE2-NEXT: shufpd {{.*#+}} xmm2 = xmm2[0],mem[1]
|
||
|
; SSE2-NEXT: movapd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movapd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordsd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minsd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v3f64:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||
|
; SSE41-NEXT: blendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm1
|
||
|
; SSE41-NEXT: minpd %xmm0, %xmm1
|
||
|
; SSE41-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: cmpunordsd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: movapd %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minsd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orpd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v3f64:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
|
||
|
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||
|
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v3f64:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm0, %xmm1, %xmm2
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm2, %xmm2, %k1
|
||
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX512-NEXT: vminsd %xmm2, %xmm1, %xmm0
|
||
|
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: vzeroupper
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call double @llvm.vector.reduce.fmin.v3f64(<3 x double> %a0)
|
||
|
ret double %1
|
||
|
}
|
||
|
|
||
|
define double @test_v4f64(<4 x double> %a0) {
|
||
|
; SSE2-LABEL: test_v4f64:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movapd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordsd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minsd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v4f64:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: minpd %xmm0, %xmm2
|
||
|
; SSE41-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm1
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: cmpunordsd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: movapd %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andpd %xmm1, %xmm3
|
||
|
; SSE41-NEXT: minsd %xmm2, %xmm1
|
||
|
; SSE41-NEXT: andnpd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: orpd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v4f64:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm3, %xmm4
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vblendvpd %xmm0, %xmm3, %xmm4, %xmm0
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm2, %xmm0
|
||
|
; AVX-NEXT: vblendvpd %xmm1, %xmm2, %xmm0, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v4f64:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm0, %xmm3, %xmm4
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vmovsd %xmm3, %xmm4, %xmm4 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm4, %xmm4, %k1
|
||
|
; AVX512-NEXT: vminsd %xmm4, %xmm1, %xmm0
|
||
|
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512-NEXT: vminsd %xmm0, %xmm2, %xmm0
|
||
|
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: vzeroupper
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %a0)
|
||
|
ret double %1
|
||
|
}
|
||
|
|
||
|
define double @test_v8f64(<8 x double> %a0) {
|
||
|
; SSE2-LABEL: test_v8f64:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movapd %xmm2, %xmm4
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm4
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm4, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: movapd %xmm3, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm1, %xmm1
|
||
|
; SSE2-NEXT: andpd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movapd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordsd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minsd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v8f64:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm4
|
||
|
; SSE41-NEXT: minpd %xmm0, %xmm4
|
||
|
; SSE41-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
|
||
|
; SSE41-NEXT: movapd %xmm3, %xmm2
|
||
|
; SSE41-NEXT: minpd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: cmpunordpd %xmm1, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm1
|
||
|
; SSE41-NEXT: minpd %xmm4, %xmm1
|
||
|
; SSE41-NEXT: cmpunordpd %xmm4, %xmm4
|
||
|
; SSE41-NEXT: movapd %xmm4, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: cmpunordsd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: movapd %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minsd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orpd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v8f64:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vminpd %ymm0, %ymm1, %ymm2
|
||
|
; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
|
||
|
; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
|
||
|
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||
|
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512BW-LABEL: test_v8f64:
|
||
|
; AVX512BW: # %bb.0:
|
||
|
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm2
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm8 = xmm2[1,0]
|
||
|
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm3
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
|
||
|
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
|
||
|
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm7, %xmm1
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vmovsd %xmm7, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm1, %xmm5, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm5, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm6, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm6, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm3, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm3, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm4, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm4, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm2, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512BW-NEXT: vminsd %xmm0, %xmm8, %xmm0
|
||
|
; AVX512BW-NEXT: vmovsd %xmm8, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512BW-NEXT: vzeroupper
|
||
|
; AVX512BW-NEXT: retq
|
||
|
;
|
||
|
; AVX512VL-LABEL: test_v8f64:
|
||
|
; AVX512VL: # %bb.0:
|
||
|
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm1
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm8 = xmm1[1,0]
|
||
|
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm3
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
|
||
|
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
|
||
|
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm7, %xmm2
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vmovsd %xmm7, %xmm2, %xmm2 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm2, %xmm2, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm2, %xmm5, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm5, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm6, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm6, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm3, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm3, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm4, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm4, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm1, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
|
||
|
; AVX512VL-NEXT: vminsd %xmm0, %xmm8, %xmm0
|
||
|
; AVX512VL-NEXT: vmovsd %xmm8, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512VL-NEXT: vzeroupper
|
||
|
; AVX512VL-NEXT: retq
|
||
|
%1 = call double @llvm.vector.reduce.fmin.v8f64(<8 x double> %a0)
|
||
|
ret double %1
|
||
|
}
|
||
|
|
||
|
define double @test_v16f64(<16 x double> %a0) {
|
||
|
; SSE2-LABEL: test_v16f64:
|
||
|
; SSE2: # %bb.0:
|
||
|
; SSE2-NEXT: movapd %xmm4, %xmm8
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm8
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm4
|
||
|
; SSE2-NEXT: andnpd %xmm8, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm4, %xmm0
|
||
|
; SSE2-NEXT: movapd %xmm6, %xmm4
|
||
|
; SSE2-NEXT: minpd %xmm2, %xmm4
|
||
|
; SSE2-NEXT: cmpunordpd %xmm2, %xmm2
|
||
|
; SSE2-NEXT: andpd %xmm2, %xmm6
|
||
|
; SSE2-NEXT: andnpd %xmm4, %xmm2
|
||
|
; SSE2-NEXT: orpd %xmm6, %xmm2
|
||
|
; SSE2-NEXT: movapd %xmm2, %xmm4
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm4
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm4, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: movapd %xmm5, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm1, %xmm1
|
||
|
; SSE2-NEXT: andpd %xmm1, %xmm5
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm5, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm7, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm3, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm3, %xmm3
|
||
|
; SSE2-NEXT: andpd %xmm3, %xmm7
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm3
|
||
|
; SSE2-NEXT: orpd %xmm7, %xmm3
|
||
|
; SSE2-NEXT: movapd %xmm3, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm1, %xmm1
|
||
|
; SSE2-NEXT: andpd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE2-NEXT: minpd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE2-NEXT: andpd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE2-NEXT: orpd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
||
|
; SSE2-NEXT: movapd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: cmpunordsd %xmm0, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm3
|
||
|
; SSE2-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE2-NEXT: minsd %xmm0, %xmm2
|
||
|
; SSE2-NEXT: andnpd %xmm2, %xmm1
|
||
|
; SSE2-NEXT: orpd %xmm3, %xmm1
|
||
|
; SSE2-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE2-NEXT: retq
|
||
|
;
|
||
|
; SSE41-LABEL: test_v16f64:
|
||
|
; SSE41: # %bb.0:
|
||
|
; SSE41-NEXT: movapd %xmm3, %xmm8
|
||
|
; SSE41-NEXT: movapd %xmm4, %xmm3
|
||
|
; SSE41-NEXT: minpd %xmm0, %xmm3
|
||
|
; SSE41-NEXT: cmpunordpd %xmm0, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
|
||
|
; SSE41-NEXT: movapd %xmm6, %xmm4
|
||
|
; SSE41-NEXT: minpd %xmm2, %xmm4
|
||
|
; SSE41-NEXT: cmpunordpd %xmm2, %xmm2
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm4
|
||
|
; SSE41-NEXT: movapd %xmm4, %xmm2
|
||
|
; SSE41-NEXT: minpd %xmm3, %xmm2
|
||
|
; SSE41-NEXT: cmpunordpd %xmm3, %xmm3
|
||
|
; SSE41-NEXT: movapd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
|
||
|
; SSE41-NEXT: movapd %xmm5, %xmm3
|
||
|
; SSE41-NEXT: minpd %xmm1, %xmm3
|
||
|
; SSE41-NEXT: cmpunordpd %xmm1, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm3
|
||
|
; SSE41-NEXT: movapd %xmm7, %xmm1
|
||
|
; SSE41-NEXT: minpd %xmm8, %xmm1
|
||
|
; SSE41-NEXT: cmpunordpd %xmm8, %xmm8
|
||
|
; SSE41-NEXT: movapd %xmm8, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm4
|
||
|
; SSE41-NEXT: minpd %xmm3, %xmm4
|
||
|
; SSE41-NEXT: cmpunordpd %xmm3, %xmm3
|
||
|
; SSE41-NEXT: movapd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
|
||
|
; SSE41-NEXT: movapd %xmm4, %xmm1
|
||
|
; SSE41-NEXT: minpd %xmm2, %xmm1
|
||
|
; SSE41-NEXT: cmpunordpd %xmm2, %xmm2
|
||
|
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
|
||
|
; SSE41-NEXT: movapd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: cmpunordsd %xmm1, %xmm0
|
||
|
; SSE41-NEXT: movapd %xmm0, %xmm3
|
||
|
; SSE41-NEXT: andpd %xmm2, %xmm3
|
||
|
; SSE41-NEXT: minsd %xmm1, %xmm2
|
||
|
; SSE41-NEXT: andnpd %xmm2, %xmm0
|
||
|
; SSE41-NEXT: orpd %xmm3, %xmm0
|
||
|
; SSE41-NEXT: retq
|
||
|
;
|
||
|
; AVX-LABEL: test_v16f64:
|
||
|
; AVX: # %bb.0:
|
||
|
; AVX-NEXT: vminpd %ymm0, %ymm2, %ymm4
|
||
|
; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
|
||
|
; AVX-NEXT: vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
|
||
|
; AVX-NEXT: vminpd %ymm1, %ymm3, %ymm2
|
||
|
; AVX-NEXT: vcmpunordpd %ymm1, %ymm1, %ymm1
|
||
|
; AVX-NEXT: vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
|
||
|
; AVX-NEXT: vminpd %ymm0, %ymm1, %ymm2
|
||
|
; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
|
||
|
; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
|
||
|
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
|
||
|
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||
|
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
|
||
|
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
|
||
|
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
||
|
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
||
|
; AVX-NEXT: vzeroupper
|
||
|
; AVX-NEXT: retq
|
||
|
;
|
||
|
; AVX512-LABEL: test_v16f64:
|
||
|
; AVX512: # %bb.0:
|
||
|
; AVX512-NEXT: vminpd %zmm0, %zmm1, %zmm2
|
||
|
; AVX512-NEXT: vcmpunordpd %zmm0, %zmm0, %k1
|
||
|
; AVX512-NEXT: vmovapd %zmm1, %zmm2 {%k1}
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm2[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm2, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm2, %xmm2, %k1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm0
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm0
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm0
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm1
|
||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
|
||
|
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
|
||
|
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
|
||
|
; AVX512-NEXT: vminsd %xmm1, %xmm2, %xmm0
|
||
|
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
|
||
|
; AVX512-NEXT: vzeroupper
|
||
|
; AVX512-NEXT: retq
|
||
|
%1 = call double @llvm.vector.reduce.fmin.v16f64(<16 x double> %a0)
|
||
|
ret double %1
|
||
|
}
|
||
|
|
||
|
declare float @llvm.vector.reduce.fmin.v1f32(<1 x float>)
|
||
|
declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>)
|
||
|
declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
|
||
|
declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
|
||
|
declare float @llvm.vector.reduce.fmin.v16f32(<16 x float>)
|
||
|
|
||
|
declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>)
|
||
|
declare double @llvm.vector.reduce.fmin.v3f64(<3 x double>)
|
||
|
declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>)
|
||
|
declare double @llvm.vector.reduce.fmin.v8f64(<8 x double>)
|
||
|
declare double @llvm.vector.reduce.fmin.v16f64(<16 x double>)
|