; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ppc32-- | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_32 ; RUN: llc < %s -mtriple=ppc32-- -mcpu=ppc64 | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_64 ; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s --check-prefixes=CHECK,CHECK64 declare i8 @llvm.fshl.i8(i8, i8, i8) declare i16 @llvm.fshl.i16(i16, i16, i16) declare i32 @llvm.fshl.i32(i32, i32, i32) declare i64 @llvm.fshl.i64(i64, i64, i64) declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) declare i8 @llvm.fshr.i8(i8, i8, i8) declare i16 @llvm.fshr.i16(i16, i16, i16) declare i32 @llvm.fshr.i32(i32, i32, i32) declare i64 @llvm.fshr.i64(i64, i64, i64) declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) ; When first 2 operands match, it's a rotate. define i8 @rotl_i8_const_shift(i8 %x) { ; CHECK-LABEL: rotl_i8_const_shift: ; CHECK: # %bb.0: ; CHECK-NEXT: rotlwi 4, 3, 27 ; CHECK-NEXT: rlwimi 4, 3, 3, 0, 28 ; CHECK-NEXT: mr 3, 4 ; CHECK-NEXT: blr %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) ret i8 %f } define i64 @rotl_i64_const_shift(i64 %x) { ; CHECK32-LABEL: rotl_i64_const_shift: ; CHECK32: # %bb.0: ; CHECK32-NEXT: rotlwi 5, 4, 3 ; CHECK32-NEXT: rotlwi 6, 3, 3 ; CHECK32-NEXT: rlwimi 5, 3, 3, 0, 28 ; CHECK32-NEXT: rlwimi 6, 4, 3, 0, 28 ; CHECK32-NEXT: mr 3, 5 ; CHECK32-NEXT: mr 4, 6 ; CHECK32-NEXT: blr ; ; CHECK64-LABEL: rotl_i64_const_shift: ; CHECK64: # %bb.0: ; CHECK64-NEXT: rotldi 3, 3, 3 ; CHECK64-NEXT: blr %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3) ret i64 %f } ; When first 2 operands match, it's a rotate (by variable amount). define i16 @rotl_i16(i16 %x, i16 %z) { ; CHECK32-LABEL: rotl_i16: ; CHECK32: # %bb.0: ; CHECK32-NEXT: clrlwi 6, 4, 28 ; CHECK32-NEXT: neg 4, 4 ; CHECK32-NEXT: clrlwi 5, 3, 16 ; CHECK32-NEXT: clrlwi 4, 4, 28 ; CHECK32-NEXT: slw 3, 3, 6 ; CHECK32-NEXT: srw 4, 5, 4 ; CHECK32-NEXT: or 3, 3, 4 ; CHECK32-NEXT: blr ; ; CHECK64-LABEL: rotl_i16: ; CHECK64: # %bb.0: ; CHECK64-NEXT: neg 5, 4 ; CHECK64-NEXT: clrlwi 6, 3, 16 ; CHECK64-NEXT: clrlwi 4, 4, 28 ; CHECK64-NEXT: clrlwi 5, 5, 28 ; CHECK64-NEXT: slw 3, 3, 4 ; CHECK64-NEXT: srw 4, 6, 5 ; CHECK64-NEXT: or 3, 3, 4 ; CHECK64-NEXT: blr %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z) ret i16 %f } define i32 @rotl_i32(i32 %x, i32 %z) { ; CHECK-LABEL: rotl_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: rotlw 3, 3, 4 ; CHECK-NEXT: blr %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z) ret i32 %f } define i64 @rotl_i64(i64 %x, i64 %z) { ; CHECK32_32-LABEL: rotl_i64: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: clrlwi 5, 6, 26 ; CHECK32_32-NEXT: subfic 8, 5, 32 ; CHECK32_32-NEXT: neg 6, 6 ; CHECK32_32-NEXT: slw 7, 3, 5 ; CHECK32_32-NEXT: addi 9, 5, -32 ; CHECK32_32-NEXT: srw 8, 4, 8 ; CHECK32_32-NEXT: clrlwi 6, 6, 26 ; CHECK32_32-NEXT: slw 9, 4, 9 ; CHECK32_32-NEXT: or 7, 7, 8 ; CHECK32_32-NEXT: subfic 8, 6, 32 ; CHECK32_32-NEXT: or 7, 7, 9 ; CHECK32_32-NEXT: addi 9, 6, -32 ; CHECK32_32-NEXT: slw 8, 3, 8 ; CHECK32_32-NEXT: srw 9, 3, 9 ; CHECK32_32-NEXT: srw 3, 3, 6 ; CHECK32_32-NEXT: srw 6, 4, 6 ; CHECK32_32-NEXT: or 6, 6, 8 ; CHECK32_32-NEXT: or 6, 6, 9 ; CHECK32_32-NEXT: slw 4, 4, 5 ; CHECK32_32-NEXT: or 3, 7, 3 ; CHECK32_32-NEXT: or 4, 4, 6 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotl_i64: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: clrlwi 5, 6, 26 ; CHECK32_64-NEXT: neg 6, 6 ; CHECK32_64-NEXT: subfic 8, 5, 32 ; CHECK32_64-NEXT: slw 7, 3, 5 ; CHECK32_64-NEXT: clrlwi 6, 6, 26 ; CHECK32_64-NEXT: srw 8, 4, 8 ; CHECK32_64-NEXT: addi 9, 5, -32 ; CHECK32_64-NEXT: or 7, 7, 8 ; CHECK32_64-NEXT: subfic 8, 6, 32 ; CHECK32_64-NEXT: slw 5, 4, 5 ; CHECK32_64-NEXT: slw 9, 4, 9 ; CHECK32_64-NEXT: srw 10, 3, 6 ; CHECK32_64-NEXT: srw 4, 4, 6 ; CHECK32_64-NEXT: addi 6, 6, -32 ; CHECK32_64-NEXT: slw 8, 3, 8 ; CHECK32_64-NEXT: srw 3, 3, 6 ; CHECK32_64-NEXT: or 4, 4, 8 ; CHECK32_64-NEXT: or 6, 7, 9 ; CHECK32_64-NEXT: or 4, 4, 3 ; CHECK32_64-NEXT: or 3, 6, 10 ; CHECK32_64-NEXT: or 4, 5, 4 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotl_i64: ; CHECK64: # %bb.0: ; CHECK64-NEXT: rotld 3, 3, 4 ; CHECK64-NEXT: blr %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z) ret i64 %f } ; Vector rotate. define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) { ; CHECK32_32-LABEL: rotl_v4i32: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: rotlw 3, 3, 7 ; CHECK32_32-NEXT: rotlw 4, 4, 8 ; CHECK32_32-NEXT: rotlw 5, 5, 9 ; CHECK32_32-NEXT: rotlw 6, 6, 10 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotl_v4i32: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: vrlw 2, 2, 3 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotl_v4i32: ; CHECK64: # %bb.0: ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f } ; Vector rotate by constant splat amount. define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) { ; CHECK32_32-LABEL: rotl_v4i32_const_shift: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: rotlwi 3, 3, 3 ; CHECK32_32-NEXT: rotlwi 4, 4, 3 ; CHECK32_32-NEXT: rotlwi 5, 5, 3 ; CHECK32_32-NEXT: rotlwi 6, 6, 3 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotl_v4i32_const_shift: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: vspltisw 3, 3 ; CHECK32_64-NEXT: vrlw 2, 2, 3 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotl_v4i32_const_shift: ; CHECK64: # %bb.0: ; CHECK64-NEXT: vspltisw 3, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } ; Repeat everything for funnel shift right. define i8 @rotr_i8_const_shift(i8 %x) { ; CHECK-LABEL: rotr_i8_const_shift: ; CHECK: # %bb.0: ; CHECK-NEXT: rotlwi 4, 3, 29 ; CHECK-NEXT: rlwimi 4, 3, 5, 0, 26 ; CHECK-NEXT: mr 3, 4 ; CHECK-NEXT: blr %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3) ret i8 %f } define i32 @rotr_i32_const_shift(i32 %x) { ; CHECK-LABEL: rotr_i32_const_shift: ; CHECK: # %bb.0: ; CHECK-NEXT: rotlwi 3, 3, 29 ; CHECK-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3) ret i32 %f } ; When first 2 operands match, it's a rotate (by variable amount). define i16 @rotr_i16(i16 %x, i16 %z) { ; CHECK32-LABEL: rotr_i16: ; CHECK32: # %bb.0: ; CHECK32-NEXT: clrlwi 6, 4, 28 ; CHECK32-NEXT: neg 4, 4 ; CHECK32-NEXT: clrlwi 5, 3, 16 ; CHECK32-NEXT: clrlwi 4, 4, 28 ; CHECK32-NEXT: srw 5, 5, 6 ; CHECK32-NEXT: slw 3, 3, 4 ; CHECK32-NEXT: or 3, 5, 3 ; CHECK32-NEXT: blr ; ; CHECK64-LABEL: rotr_i16: ; CHECK64: # %bb.0: ; CHECK64-NEXT: neg 5, 4 ; CHECK64-NEXT: clrlwi 6, 3, 16 ; CHECK64-NEXT: clrlwi 4, 4, 28 ; CHECK64-NEXT: clrlwi 5, 5, 28 ; CHECK64-NEXT: srw 4, 6, 4 ; CHECK64-NEXT: slw 3, 3, 5 ; CHECK64-NEXT: or 3, 4, 3 ; CHECK64-NEXT: blr %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z) ret i16 %f } define i32 @rotr_i32(i32 %x, i32 %z) { ; CHECK-LABEL: rotr_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: neg 4, 4 ; CHECK-NEXT: rotlw 3, 3, 4 ; CHECK-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z) ret i32 %f } define i64 @rotr_i64(i64 %x, i64 %z) { ; CHECK32_32-LABEL: rotr_i64: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: clrlwi 5, 6, 26 ; CHECK32_32-NEXT: subfic 8, 5, 32 ; CHECK32_32-NEXT: neg 6, 6 ; CHECK32_32-NEXT: srw 7, 4, 5 ; CHECK32_32-NEXT: addi 9, 5, -32 ; CHECK32_32-NEXT: slw 8, 3, 8 ; CHECK32_32-NEXT: clrlwi 6, 6, 26 ; CHECK32_32-NEXT: srw 9, 3, 9 ; CHECK32_32-NEXT: or 7, 7, 8 ; CHECK32_32-NEXT: subfic 8, 6, 32 ; CHECK32_32-NEXT: or 7, 7, 9 ; CHECK32_32-NEXT: addi 9, 6, -32 ; CHECK32_32-NEXT: srw 8, 4, 8 ; CHECK32_32-NEXT: slw 9, 4, 9 ; CHECK32_32-NEXT: slw 4, 4, 6 ; CHECK32_32-NEXT: slw 6, 3, 6 ; CHECK32_32-NEXT: or 6, 6, 8 ; CHECK32_32-NEXT: or 6, 6, 9 ; CHECK32_32-NEXT: srw 3, 3, 5 ; CHECK32_32-NEXT: or 4, 7, 4 ; CHECK32_32-NEXT: or 3, 3, 6 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotr_i64: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: clrlwi 5, 6, 26 ; CHECK32_64-NEXT: neg 6, 6 ; CHECK32_64-NEXT: subfic 8, 5, 32 ; CHECK32_64-NEXT: srw 7, 4, 5 ; CHECK32_64-NEXT: clrlwi 6, 6, 26 ; CHECK32_64-NEXT: slw 8, 3, 8 ; CHECK32_64-NEXT: addi 9, 5, -32 ; CHECK32_64-NEXT: or 7, 7, 8 ; CHECK32_64-NEXT: subfic 8, 6, 32 ; CHECK32_64-NEXT: srw 5, 3, 5 ; CHECK32_64-NEXT: srw 9, 3, 9 ; CHECK32_64-NEXT: slw 10, 4, 6 ; CHECK32_64-NEXT: slw 3, 3, 6 ; CHECK32_64-NEXT: addi 6, 6, -32 ; CHECK32_64-NEXT: srw 8, 4, 8 ; CHECK32_64-NEXT: slw 4, 4, 6 ; CHECK32_64-NEXT: or 3, 3, 8 ; CHECK32_64-NEXT: or 6, 7, 9 ; CHECK32_64-NEXT: or 3, 3, 4 ; CHECK32_64-NEXT: or 4, 6, 10 ; CHECK32_64-NEXT: or 3, 5, 3 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotr_i64: ; CHECK64: # %bb.0: ; CHECK64-NEXT: neg 4, 4 ; CHECK64-NEXT: rotld 3, 3, 4 ; CHECK64-NEXT: blr %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z) ret i64 %f } ; Vector rotate. define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) { ; CHECK32_32-LABEL: rotr_v4i32: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: neg 7, 7 ; CHECK32_32-NEXT: neg 8, 8 ; CHECK32_32-NEXT: neg 9, 9 ; CHECK32_32-NEXT: neg 10, 10 ; CHECK32_32-NEXT: rotlw 3, 3, 7 ; CHECK32_32-NEXT: rotlw 4, 4, 8 ; CHECK32_32-NEXT: rotlw 5, 5, 9 ; CHECK32_32-NEXT: rotlw 6, 6, 10 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotr_v4i32: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: vxor 4, 4, 4 ; CHECK32_64-NEXT: vsubuwm 3, 4, 3 ; CHECK32_64-NEXT: vrlw 2, 2, 3 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotr_v4i32: ; CHECK64: # %bb.0: ; CHECK64-NEXT: xxlxor 36, 36, 36 ; CHECK64-NEXT: vsubuwm 3, 4, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f } ; Vector rotate by constant splat amount. define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) { ; CHECK32_32-LABEL: rotr_v4i32_const_shift: ; CHECK32_32: # %bb.0: ; CHECK32_32-NEXT: rotlwi 3, 3, 29 ; CHECK32_32-NEXT: rotlwi 4, 4, 29 ; CHECK32_32-NEXT: rotlwi 5, 5, 29 ; CHECK32_32-NEXT: rotlwi 6, 6, 29 ; CHECK32_32-NEXT: blr ; ; CHECK32_64-LABEL: rotr_v4i32_const_shift: ; CHECK32_64: # %bb.0: ; CHECK32_64-NEXT: vspltisw 3, -16 ; CHECK32_64-NEXT: vspltisw 4, 13 ; CHECK32_64-NEXT: vsubuwm 3, 4, 3 ; CHECK32_64-NEXT: vrlw 2, 2, 3 ; CHECK32_64-NEXT: blr ; ; CHECK64-LABEL: rotr_v4i32_const_shift: ; CHECK64: # %bb.0: ; CHECK64-NEXT: vspltisw 3, -16 ; CHECK64-NEXT: vspltisw 4, 13 ; CHECK64-NEXT: vsubuwm 3, 4, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } define i32 @rotl_i32_shift_by_bitwidth(i32 %x) { ; CHECK-LABEL: rotl_i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32) ret i32 %f } define i32 @rotr_i32_shift_by_bitwidth(i32 %x) { ; CHECK-LABEL: rotr_i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32) ret i32 %f } define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) { ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) { ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f }