40 lines
1.7 KiB
LLVM
40 lines
1.7 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||
|
; RUN: opt < %s -slp-vectorizer -instcombine -S | FileCheck %s
|
||
|
|
||
|
; Regression test for a bug in the SLP vectorizer that was causing
|
||
|
; these rotates to be incorrectly combined into a vector rotate.
|
||
|
|
||
|
; The bug fix is at https://reviews.llvm.org/D85759. This test has
|
||
|
; been pre-committed to demonstrate the regressed behavior and provide
|
||
|
; a clear diff for the bug fix.
|
||
|
|
||
|
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
|
||
|
target triple = "wasm32-unknown-unknown"
|
||
|
|
||
|
define void @foo(<2 x i64> %x, <4 x i32> %y, i64* %out) #0 {
|
||
|
; CHECK-LABEL: @foo(
|
||
|
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
|
||
|
; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
|
||
|
; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[X:%.*]], <2 x i64> [[X]], <2 x i64> [[TMP2]])
|
||
|
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i64* [[OUT:%.*]] to <2 x i64>*
|
||
|
; CHECK-NEXT: store <2 x i64> [[TMP3]], <2 x i64>* [[TMP4]], align 8
|
||
|
; CHECK-NEXT: ret void
|
||
|
;
|
||
|
%a = extractelement <2 x i64> %x, i32 0
|
||
|
%b = extractelement <4 x i32> %y, i32 2
|
||
|
%conv6 = zext i32 %b to i64
|
||
|
%c = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %conv6)
|
||
|
store i64 %c, i64* %out
|
||
|
%d = extractelement <2 x i64> %x, i32 1
|
||
|
%e = extractelement <4 x i32> %y, i32 3
|
||
|
%conv17 = zext i32 %e to i64
|
||
|
%f = tail call i64 @llvm.fshl.i64(i64 %d, i64 %d, i64 %conv17)
|
||
|
%arrayidx2 = getelementptr inbounds i64, i64* %out, i32 1
|
||
|
store i64 %f, i64* %arrayidx2
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
declare i64 @llvm.fshl.i64(i64, i64, i64)
|
||
|
|
||
|
attributes #0 = {"target-cpu"="generic" "target-features"="+simd128"}
|