157 lines
6.0 KiB
YAML
157 lines
6.0 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SSE
|
|
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
|
|
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512F
|
|
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
|
|
--- |
|
|
|
|
define float @test_fadd_float(float %arg1, float %arg2) {
|
|
%ret = fadd float %arg1, %arg2
|
|
ret float %ret
|
|
}
|
|
|
|
define double @test_fadd_double(double %arg1, double %arg2) {
|
|
%ret = fadd double %arg1, %arg2
|
|
ret double %ret
|
|
}
|
|
|
|
...
|
|
---
|
|
name: test_fadd_float
|
|
alignment: 16
|
|
legalized: true
|
|
regBankSelected: true
|
|
#
|
|
registers:
|
|
- { id: 0, class: vecr, preferred-register: '' }
|
|
- { id: 1, class: vecr, preferred-register: '' }
|
|
- { id: 2, class: vecr, preferred-register: '' }
|
|
- { id: 3, class: vecr, preferred-register: '' }
|
|
- { id: 4, class: vecr, preferred-register: '' }
|
|
- { id: 5, class: vecr, preferred-register: '' }
|
|
liveins:
|
|
fixedStack:
|
|
stack:
|
|
constants:
|
|
#
|
|
#
|
|
body: |
|
|
bb.1 (%ir-block.0):
|
|
liveins: $xmm0, $xmm1
|
|
|
|
; SSE-LABEL: name: test_fadd_float
|
|
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
|
|
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
|
|
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
|
|
; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
|
|
; SSE: %4:fr32 = nofpexcept ADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
|
|
; SSE: $xmm0 = COPY [[COPY4]]
|
|
; SSE: RET 0, implicit $xmm0
|
|
; AVX-LABEL: name: test_fadd_float
|
|
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
|
|
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
|
|
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
|
|
; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
|
|
; AVX: %4:fr32 = nofpexcept VADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
|
|
; AVX: $xmm0 = COPY [[COPY4]]
|
|
; AVX: RET 0, implicit $xmm0
|
|
; AVX512F-LABEL: name: test_fadd_float
|
|
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
|
|
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
|
|
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
|
|
; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
|
|
; AVX512F: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
|
|
; AVX512F: $xmm0 = COPY [[COPY4]]
|
|
; AVX512F: RET 0, implicit $xmm0
|
|
; AVX512VL-LABEL: name: test_fadd_float
|
|
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
|
|
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
|
|
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
|
|
; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
|
|
; AVX512VL: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
|
|
; AVX512VL: $xmm0 = COPY [[COPY4]]
|
|
; AVX512VL: RET 0, implicit $xmm0
|
|
%2:vecr(s128) = COPY $xmm0
|
|
%0:vecr(s32) = G_TRUNC %2(s128)
|
|
%3:vecr(s128) = COPY $xmm1
|
|
%1:vecr(s32) = G_TRUNC %3(s128)
|
|
%4:vecr(s32) = G_FADD %0, %1
|
|
%5:vecr(s128) = G_ANYEXT %4(s32)
|
|
$xmm0 = COPY %5(s128)
|
|
RET 0, implicit $xmm0
|
|
|
|
...
|
|
---
|
|
name: test_fadd_double
|
|
alignment: 16
|
|
legalized: true
|
|
regBankSelected: true
|
|
#
|
|
registers:
|
|
- { id: 0, class: vecr, preferred-register: '' }
|
|
- { id: 1, class: vecr, preferred-register: '' }
|
|
- { id: 2, class: vecr, preferred-register: '' }
|
|
- { id: 3, class: vecr, preferred-register: '' }
|
|
- { id: 4, class: vecr, preferred-register: '' }
|
|
- { id: 5, class: vecr, preferred-register: '' }
|
|
liveins:
|
|
fixedStack:
|
|
stack:
|
|
constants:
|
|
#
|
|
#
|
|
body: |
|
|
bb.1 (%ir-block.0):
|
|
liveins: $xmm0, $xmm1
|
|
|
|
; SSE-LABEL: name: test_fadd_double
|
|
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
|
|
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
|
|
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
|
|
; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
|
|
; SSE: %4:fr64 = nofpexcept ADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
|
|
; SSE: $xmm0 = COPY [[COPY4]]
|
|
; SSE: RET 0, implicit $xmm0
|
|
; AVX-LABEL: name: test_fadd_double
|
|
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
|
|
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
|
|
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
|
|
; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
|
|
; AVX: %4:fr64 = nofpexcept VADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
|
|
; AVX: $xmm0 = COPY [[COPY4]]
|
|
; AVX: RET 0, implicit $xmm0
|
|
; AVX512F-LABEL: name: test_fadd_double
|
|
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
|
|
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
|
|
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
|
|
; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
|
|
; AVX512F: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
|
|
; AVX512F: $xmm0 = COPY [[COPY4]]
|
|
; AVX512F: RET 0, implicit $xmm0
|
|
; AVX512VL-LABEL: name: test_fadd_double
|
|
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
|
|
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
|
|
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
|
|
; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
|
|
; AVX512VL: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
|
|
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
|
|
; AVX512VL: $xmm0 = COPY [[COPY4]]
|
|
; AVX512VL: RET 0, implicit $xmm0
|
|
%2:vecr(s128) = COPY $xmm0
|
|
%0:vecr(s64) = G_TRUNC %2(s128)
|
|
%3:vecr(s128) = COPY $xmm1
|
|
%1:vecr(s64) = G_TRUNC %3(s128)
|
|
%4:vecr(s64) = G_FADD %0, %1
|
|
%5:vecr(s128) = G_ANYEXT %4(s64)
|
|
$xmm0 = COPY %5(s128)
|
|
RET 0, implicit $xmm0
|
|
|
|
...
|