1037 lines
55 KiB
LLVM
1037 lines
55 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK-FMA
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK-AVX512VL
|
|
; RUN: llc < %s -mtriple=x86_64-pc-windows -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK-FMA-WIN
|
|
|
|
; VFMADD
|
|
define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fma_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xa9,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
|
|
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fma_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
|
|
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <4 x float> @test_x86_fma_vfmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
; VFMSUB
|
|
define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xab,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fma_vfmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xab,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
|
|
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fma_vfmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xab,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
|
|
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <4 x float> @test_x86_fma_vfmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
; VFNMADD
|
|
define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xad,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fma_vfnmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xad,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
|
|
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fma_vfnmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xad,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
|
|
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
; VFNMSUB
|
|
define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fma_vfnmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_ss:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xaf,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
|
|
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_ss:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_ss:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x02]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fma_vfnmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_sd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
|
|
; CHECK-FMA-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
|
|
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_sd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
|
|
; CHECK-AVX512VL-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
|
|
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_sd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
|
|
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x01]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
; VFMADDSUB
|
|
define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
; VFMSUBADD
|
|
define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float>, <4 x float>, <4 x float>)
|
|
|
|
define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
|
|
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double>, <2 x double>, <2 x double>)
|
|
|
|
define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8 x float>)
|
|
|
|
define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
|
|
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd_256:
|
|
; CHECK-FMA: # %bb.0:
|
|
; CHECK-FMA-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
|
|
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd_256:
|
|
; CHECK-AVX512VL: # %bb.0:
|
|
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
|
|
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
|
|
;
|
|
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd_256:
|
|
; CHECK-FMA-WIN: # %bb.0:
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
|
|
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
|
|
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00]
|
|
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem
|
|
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
|
|
%res = call <4 x double> @llvm.x86.fma.vfmsubadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.fma.vfmsubadd.pd.256(<4 x double>, <4 x double>, <4 x double>)
|
|
|
|
attributes #0 = { nounwind }
|