// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -msve-vector-bits=512 -fallow-half-arguments-and-returns -S -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s #include #define N __ARM_FEATURE_SVE_BITS typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); fixed_bool_t global_pred; fixed_int32_t global_vec; // CHECK-LABEL: @foo( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[PG:%.*]] = alloca , align 2 // CHECK-NEXT: store [[PRED:%.*]], * [[PRED_ADDR]], align 2 // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 // CHECK-NEXT: [[TMP2:%.*]] = load , * bitcast (<8 x i8>* @global_pred to *), align 2 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 // CHECK-NEXT: [[TMP4:%.*]] = load , * bitcast (<8 x i8>* @global_pred to *), align 2 // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], * [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP8:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE1]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { svbool_t pg = svand_z(pred, global_pred, global_pred); return svadd_m(pg, global_vec, vec); } // CHECK-LABEL: @test_ptr_to_global( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca <16 x i32>*, align 8 // CHECK-NEXT: store <16 x i32>* @global_vec, <16 x i32>** [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>*, <16 x i32>** [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t test_ptr_to_global() { fixed_int32_t *global_vec_ptr; global_vec_ptr = &global_vec; return *global_vec_ptr; } // // Test casting pointer from fixed-length array to scalable vector. // CHECK-LABEL: @array_arg( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca <16 x i32>*, align 8 // CHECK-NEXT: store <16 x i32>* [[ARR:%.*]], <16 x i32>** [[ARR_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>*, <16 x i32>** [[ARR_ADDR]], align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <16 x i32>, <16 x i32>* [[TMP0]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[ARRAYIDX]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t array_arg(fixed_int32_t arr[]) { return arr[0]; } // CHECK-LABEL: @address_of_array_idx( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i8>, align 2 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <8 x i8>], align 2 // CHECK-NEXT: [[PARR:%.*]] = alloca <8 x i8>*, align 8 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], [3 x <8 x i8>]* [[ARR]], i64 0, i64 0 // CHECK-NEXT: store <8 x i8>* [[ARRAYIDX]], <8 x i8>** [[PARR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>*, <8 x i8>** [[PARR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = bitcast * [[RETVAL_COERCE]] to i8* // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[RETVAL]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false) // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP4]] // fixed_bool_t address_of_array_idx() { fixed_bool_t arr[3]; fixed_bool_t *parr; parr = &arr[0]; return *parr; }