llvm-for-llvmta/test/CodeGen/X86/zext-logicop-shift-load.ll

180 lines
4.5 KiB
LLVM
Raw Normal View History

2022-04-25 10:02:23 +02:00
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
define i64 @test1(i8* %data) {
; X86-LABEL: test1:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl (%eax), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: andl $60, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: shll $2, %eax
; X64-NEXT: andl $60, %eax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = shl i8 %bf.load, 2
%0 = and i8 %bf.clear, 60
%mul = zext i8 %0 to i64
ret i64 %mul
}
define i8* @test2(i8* %data) {
; X86-LABEL: test2:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl (%eax), %ecx
; X86-NEXT: andl $15, %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: andl $15, %eax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = shl i8 %bf.load, 2
%0 = and i8 %bf.clear, 60
%mul = zext i8 %0 to i64
%add.ptr = getelementptr inbounds i8, i8* %data, i64 %mul
ret i8* %add.ptr
}
; If the shift op is SHL, the logic op can only be AND.
define i64 @test3(i8* %data) {
; X86-LABEL: test3:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movb (%eax), %al
; X86-NEXT: shlb $2, %al
; X86-NEXT: xorb $60, %al
; X86-NEXT: movzbl %al, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
; X64-NEXT: movb (%rdi), %al
; X64-NEXT: shlb $2, %al
; X64-NEXT: xorb $60, %al
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = shl i8 %bf.load, 2
%0 = xor i8 %bf.clear, 60
%mul = zext i8 %0 to i64
ret i64 %mul
}
define i64 @test4(i8* %data) {
; X86-LABEL: test4:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl (%eax), %eax
; X86-NEXT: shrl $2, %eax
; X86-NEXT: andl $-4, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test4:
; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: shrq $2, %rax
; X64-NEXT: andl $60, %eax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = lshr i8 %bf.load, 2
%0 = and i8 %bf.clear, 60
%1 = zext i8 %0 to i64
ret i64 %1
}
define i64 @test5(i8* %data) {
; X86-LABEL: test5:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl (%eax), %eax
; X86-NEXT: shrl $2, %eax
; X86-NEXT: xorl $60, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test5:
; X64: # %bb.0: # %entry
; X64-NEXT: movzbl (%rdi), %eax
; X64-NEXT: shrq $2, %rax
; X64-NEXT: xorq $60, %rax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = lshr i8 %bf.load, 2
%0 = xor i8 %bf.clear, 60
%1 = zext i8 %0 to i64
ret i64 %1
}
define i64 @test6(i8* %data) {
; X86-LABEL: test6:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl (%eax), %eax
; X86-NEXT: shrl $2, %eax
; X86-NEXT: orl $60, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test6:
; X64: # %bb.0: # %entry
; X64-NEXT: movzbl (%rdi), %eax
; X64-NEXT: shrq $2, %rax
; X64-NEXT: orq $60, %rax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%bf.clear = lshr i8 %bf.load, 2
%0 = or i8 %bf.clear, 60
%1 = zext i8 %0 to i64
ret i64 %1
}
; Load is folded with sext.
define i64 @test8(i8* %data) {
; X86-LABEL: test8:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movsbl (%eax), %eax
; X86-NEXT: movzwl %ax, %eax
; X86-NEXT: shrl $2, %eax
; X86-NEXT: orl $60, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: test8:
; X64: # %bb.0: # %entry
; X64-NEXT: movsbl (%rdi), %eax
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: shrl $2, %eax
; X64-NEXT: orl $60, %eax
; X64-NEXT: retq
entry:
%bf.load = load i8, i8* %data, align 4
%ext = sext i8 %bf.load to i16
%bf.clear = lshr i16 %ext, 2
%0 = or i16 %bf.clear, 60
%1 = zext i16 %0 to i64
ret i64 %1
}