126 lines
4.6 KiB
LLVM
126 lines
4.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt %s -instsimplify -S | FileCheck %s
|
|
|
|
; Here we add unsigned two values, check that addition did not underflow AND
|
|
; that the result is non-zero. This can be simplified just to a comparison
|
|
; between the base and negated offset.
|
|
|
|
declare void @use8(i8)
|
|
|
|
declare void @use1(i1)
|
|
declare void @llvm.assume(i1)
|
|
|
|
; If we are checking that the result is not null or no underflow happened,
|
|
; it is tautological (always-true).
|
|
define i1 @t1(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t1(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
|
|
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ult i8 %adjusted, %base
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
define i1 @t2_commutative(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t2_commutative(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
|
|
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ugt i8 %base, %adjusted
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
|
|
; If we are checking that the result is null and underflow happened,
|
|
; it is tautological (always-false).
|
|
define i1 @t3(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t3(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp eq i8 %adjusted, 0
|
|
%no_underflow = icmp uge i8 %adjusted, %base
|
|
%r = and i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
define i1 @t4_commutative(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t4_commutative(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp eq i8 %adjusted, 0
|
|
%no_underflow = icmp ule i8 %base, %adjusted
|
|
%r = and i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
|
|
; We only need to know that any of the 'add' operands is non-zero,
|
|
; not necessarily the one used in the comparison.
|
|
define i1 @t5(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t5(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
|
|
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %offset, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ult i8 %adjusted, %base
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|