diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 93c01f289d422..becfc60e8e161 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -10,8 +10,8 @@ use tracing::{debug, trace}; use crate::{ AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, - LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, - TargetDataLayout, Variants, WrappingRange, + LayoutData, Niche, NonZeroUsize, NumScalableVectors, Primitive, ReprOptions, Scalar, Size, + StructKind, TagEncoding, TargetDataLayout, Variants, WrappingRange, }; mod coroutine; @@ -204,13 +204,19 @@ impl LayoutCalculator { &self, element: F, count: u64, + number_of_vectors: NumScalableVectors, ) -> LayoutCalculatorResult where FieldIdx: Idx, VariantIdx: Idx, F: AsRef> + fmt::Debug, { - vector_type_layout(SimdVectorKind::Scalable, self.cx.data_layout(), element, count) + vector_type_layout( + SimdVectorKind::Scalable(number_of_vectors), + self.cx.data_layout(), + element, + count, + ) } pub fn simd_type( @@ -1526,7 +1532,7 @@ impl LayoutCalculator { enum SimdVectorKind { /// `#[rustc_scalable_vector]` - Scalable, + Scalable(NumScalableVectors), /// `#[repr(simd, packed)]` PackedFixed, /// `#[repr(simd)]` @@ -1559,9 +1565,10 @@ where let size = elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?; let (repr, align) = match kind { - SimdVectorKind::Scalable => { - (BackendRepr::SimdScalableVector { element, count }, dl.llvmlike_vector_align(size)) - } + SimdVectorKind::Scalable(number_of_vectors) => ( + BackendRepr::SimdScalableVector { element, count, number_of_vectors }, + dl.llvmlike_vector_align(size), + ), // Non-power-of-two vectors have padding up to the next power-of-two. // If we're a packed repr, remove the padding while keeping the alignment as close // to a vector as possible. diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 21ca92d46d1c6..7fa4f3631f81d 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1716,6 +1716,28 @@ impl AddressSpace { pub const ZERO: Self = AddressSpace(0); } +/// How many scalable vectors are in a `BackendRepr::ScalableVector`? +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[cfg_attr(feature = "nightly", derive(HashStable_Generic))] +pub struct NumScalableVectors(pub u8); + +impl NumScalableVectors { + /// Returns a `NumScalableVector` for a non-tuple scalable vector (e.g. a single vector). + pub fn for_non_tuple() -> Self { + NumScalableVectors(1) + } + + // Returns `NumScalableVectors` for values of two through eight, which are a valid number of + // fields for a tuple of scalable vectors to have. `1` is a valid value of `NumScalableVectors` + // but not for a tuple which would have a field count. + pub fn from_field_count(count: usize) -> Option { + match count { + 2..8 => Some(NumScalableVectors(count as u8)), + _ => None, + } + } +} + /// The way we represent values to the backend /// /// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI. @@ -1734,6 +1756,7 @@ pub enum BackendRepr { SimdScalableVector { element: Scalar, count: u64, + number_of_vectors: NumScalableVectors, }, SimdVector { element: Scalar, @@ -1840,8 +1863,12 @@ impl BackendRepr { BackendRepr::SimdVector { element: element.to_union(), count } } BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true }, - BackendRepr::SimdScalableVector { element, count } => { - BackendRepr::SimdScalableVector { element: element.to_union(), count } + BackendRepr::SimdScalableVector { element, count, number_of_vectors } => { + BackendRepr::SimdScalableVector { + element: element.to_union(), + count, + number_of_vectors, + } } } } @@ -2181,7 +2208,7 @@ impl LayoutData { } /// Returns `true` if the size of the type is only known at runtime. - pub fn is_runtime_sized(&self) -> bool { + pub fn is_scalable_vector(&self) -> bool { matches!(self.backend_repr, BackendRepr::SimdScalableVector { .. }) } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 1d5db049f7dc3..05b4079dcc5d3 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -24,7 +24,8 @@ use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ - FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers, + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, + LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, AtomicOrdering, Instance, Ty, TyCtxt}; use rustc_span::Span; @@ -943,8 +944,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .get_address(self.location) } - fn scalable_alloca(&mut self, _elt: u64, _align: Align, _element_ty: Ty<'_>) -> RValue<'gcc> { - todo!() + fn alloca_with_ty(&mut self, ty: TyAndLayout<'tcx>) -> RValue<'gcc> { + self.alloca(ty.layout.size, ty.layout.align.abi) } fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 79cae9e028260..0683166e6a919 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -145,6 +145,10 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i64(&self, i: i64) -> RValue<'gcc> { + self.const_int(self.type_i64(), i) + } + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { self.gcc_int(typ, int) } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 2d91caf40f3c9..ec657678c0d81 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -7,8 +7,7 @@ pub(crate) mod autodiff; pub(crate) mod gpu_offload; use libc::{c_char, c_uint}; -use rustc_abi as abi; -use rustc_abi::{Align, Size, WrappingRange}; +use rustc_abi::{self as abi, Align, Size, WrappingRange}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; @@ -616,21 +615,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value { + fn alloca_with_ty(&mut self, layout: TyAndLayout<'tcx>) -> Self::Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - let llvm_ty = match element_ty.kind() { - ty::Bool => bx.type_i1(), - ty::Int(int_ty) => self.cx.type_int_from_ty(*int_ty), - ty::Uint(uint_ty) => self.cx.type_uint_from_ty(*uint_ty), - ty::Float(float_ty) => self.cx.type_float_from_ty(*float_ty), - _ => unreachable!("scalable vectors can only contain a bool, int, uint or float"), - }; + let scalable_vector_ty = layout.llvm_type(self.cx); unsafe { - let ty = llvm::LLVMScalableVectorType(llvm_ty, elt.try_into().unwrap()); - let alloca = llvm::LLVMBuildAlloca(&bx.llbuilder, ty, UNNAMED); - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + let alloca = llvm::LLVMBuildAlloca(&bx.llbuilder, scalable_vector_ty, UNNAMED); + llvm::LLVMSetAlignment(alloca, layout.align.abi.bytes() as c_uint); alloca } } diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index a134e97cc8915..dadf8e9e7d5fa 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -159,6 +159,10 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i64(&self, i: i64) -> &'ll Value { + self.const_int(self.type_i64(), i as i64) + } + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { debug_assert!( self.type_kind(t) == TypeKind::Integer, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/dwarf_const.rs b/compiler/rustc_codegen_llvm/src/debuginfo/dwarf_const.rs index 52d04625749b9..1172660af4a29 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/dwarf_const.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/dwarf_const.rs @@ -35,6 +35,14 @@ declare_constant!(DW_OP_plus_uconst: u64); /// Double-checked by a static assertion in `RustWrapper.cpp`. #[allow(non_upper_case_globals)] pub(crate) const DW_OP_LLVM_fragment: u64 = 0x1000; +#[allow(non_upper_case_globals)] +pub(crate) const DW_OP_constu: u64 = 0x10; +#[allow(non_upper_case_globals)] +pub(crate) const DW_OP_minus: u64 = 0x1c; +#[allow(non_upper_case_globals)] +pub(crate) const DW_OP_mul: u64 = 0x1e; +#[allow(non_upper_case_globals)] +pub(crate) const DW_OP_bregx: u64 = 0x92; // It describes the actual value of a source variable which might not exist in registers or in memory. #[allow(non_upper_case_globals)] pub(crate) const DW_OP_stack_value: u64 = 0x9f; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 04c0b6953290c..52d0f25ab0e6f 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -3,10 +3,10 @@ use std::fmt::{self, Write}; use std::hash::{Hash, Hasher}; use std::path::PathBuf; use std::sync::Arc; -use std::{iter, ptr}; +use std::{assert_matches, iter, ptr}; use libc::{c_longlong, c_uint}; -use rustc_abi::{Align, Size}; +use rustc_abi::{Align, Layout, NumScalableVectors, Size}; use rustc_codegen_ssa::debuginfo::type_names::{VTableNameKind, cpp_like_debuginfo}; use rustc_codegen_ssa::traits::*; use rustc_hir::def::{CtorKind, DefKind}; @@ -16,12 +16,12 @@ use rustc_middle::ty::layout::{ HasTypingEnv, LayoutOf, TyAndLayout, WIDE_PTR_ADDR, WIDE_PTR_EXTRA, }; use rustc_middle::ty::{ - self, AdtKind, CoroutineArgsExt, ExistentialTraitRef, Instance, Ty, TyCtxt, Visibility, + self, AdtDef, AdtKind, CoroutineArgsExt, ExistentialTraitRef, Instance, Ty, TyCtxt, Visibility, }; use rustc_session::config::{self, DebugInfo, Lto}; use rustc_span::{DUMMY_SP, FileName, RemapPathScopeComponents, SourceFile, Span, Symbol, hygiene}; use rustc_symbol_mangling::typeid_for_trait_ref; -use rustc_target::spec::DebuginfoKind; +use rustc_target::spec::{Arch, DebuginfoKind}; use smallvec::smallvec; use tracing::{debug, instrument}; @@ -33,7 +33,7 @@ use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_na use super::utils::{DIB, debug_context, get_namespace_for_item, is_node_local_to_unit}; use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::type_map::build_type_with_children; -use crate::debuginfo::utils::{WidePtrKind, wide_pointer_kind}; +use crate::debuginfo::utils::{WidePtrKind, create_DIArray, wide_pointer_kind}; use crate::debuginfo::{DIBuilderExt, dwarf_const}; use crate::llvm::debuginfo::{ DIBasicType, DIBuilder, DICompositeType, DIDescriptor, DIFile, DIFlags, DILexicalBlock, @@ -1039,6 +1039,7 @@ fn build_struct_type_di_node<'ll, 'tcx>( span: Span, ) -> DINodeCreationResult<'ll> { let struct_type = unique_type_id.expect_ty(); + let ty::Adt(adt_def, _) = struct_type.kind() else { bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type); }; @@ -1051,6 +1052,21 @@ fn build_struct_type_di_node<'ll, 'tcx>( } else { None }; + let name = compute_debuginfo_type_name(cx.tcx, struct_type, false); + + if struct_type.is_scalable_vector() { + let parts = struct_type.scalable_vector_parts(cx.tcx).unwrap(); + return build_scalable_vector_di_node( + cx, + unique_type_id, + name, + *adt_def, + parts, + struct_type_and_layout.layout, + def_location, + containing_scope, + ); + } type_map::build_type_with_children( cx, @@ -1058,7 +1074,7 @@ fn build_struct_type_di_node<'ll, 'tcx>( cx, Stub::Struct, unique_type_id, - &compute_debuginfo_type_name(cx.tcx, struct_type, false), + &name, def_location, size_and_align_of(struct_type_and_layout), Some(containing_scope), @@ -1101,6 +1117,100 @@ fn build_struct_type_di_node<'ll, 'tcx>( ) } +/// Generate debuginfo for a `#[rustc_scalable_vector]` type. +/// +/// Debuginfo for a scalable vector takes the form of a derived type with a composite base type +/// with `DIFlagVector` that itself has a base type of whatever the element of the scalable vector +/// is. The composite type has a subrange from 0 to an expression that calculates the number of +/// elements in the vector. +/// +/// ```text, ignore +/// !1 = !DIDerivedType(tag: DW_TAG_typedef, name: "svint16_t", ..., baseType: !2, ...) +/// !2 = !DICompositeType(tag: DW_TAG_array_type, baseType: !3, ..., flags: DIFlagVector, elements: !4) +/// !3 = !DIBasicType(name: "i16", size: 16, encoding: DW_ATE_signed) +/// !4 = !{!5} +/// !5 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) +/// ``` +/// +/// See the `CodegenType::CreateType(const BuiltinType *BT)` implementation in Clang for how this +/// is generated for C and C++. +fn build_scalable_vector_di_node<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + unique_type_id: UniqueTypeId<'tcx>, + name: String, + adt_def: AdtDef<'tcx>, + (element_count, element_ty, number_of_vectors): (u16, Ty<'tcx>, NumScalableVectors), + layout: Layout<'tcx>, + def_location: Option>, + containing_scope: &'ll DIScope, +) -> DINodeCreationResult<'ll> { + use dwarf_const::{DW_OP_bregx, DW_OP_constu, DW_OP_minus, DW_OP_mul}; + assert!(adt_def.repr().scalable()); + // This logic is specific to AArch64 for the moment, but can be extended for other architectures + // later. + assert_matches!(cx.tcx.sess.target.arch, Arch::AArch64); + + let (file_metadata, line_number) = if let Some(def_location) = def_location { + (def_location.0, def_location.1) + } else { + (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER) + }; + + let (bitstride, element_di_node) = if element_ty.is_bool() { + (Some(llvm::LLVMValueAsMetadata(cx.const_i64(1))), type_di_node(cx, cx.tcx.types.u8)) + } else { + (None, type_di_node(cx, element_ty)) + }; + + let number_of_elements: u64 = (element_count as u64) * (number_of_vectors.0 as u64); + let number_of_elements_per_vg = number_of_elements / 2; + let mut expr = smallvec::SmallVec::<[u64; 9]>::new(); + // `($number_of_elements_per_vector_granule * (value_of_register(AArch64::VG) + 0)) - 1` + expr.push(DW_OP_constu); // Push a constant onto the stack + expr.push(number_of_elements_per_vg); + expr.push(DW_OP_bregx); // Push the value of a register + offset on to the stack + expr.push(/* AArch64::VG */ 46u64); + expr.push(0u64); + expr.push(DW_OP_mul); // Multiply top two values on stack + expr.push(DW_OP_constu); // Push a constant onto the stack + expr.push(1u64); + expr.push(DW_OP_minus); // Subtract top two values on stack + + let di_builder = DIB(cx); + let metadata = unsafe { + let upper = llvm::LLVMDIBuilderCreateExpression(di_builder, expr.as_ptr(), expr.len()); + let subrange = llvm::LLVMRustDIGetOrCreateSubrange( + di_builder, + /* CountNode */ None, + llvm::LLVMValueAsMetadata(cx.const_i64(0)), + upper, + /* Stride */ None, + ); + let subscripts = create_DIArray(di_builder, &[Some(subrange)]); + let vector_ty = llvm::LLVMRustDICreateVectorType( + di_builder, + /* Size */ 0, + layout.align.bits() as u32, + element_di_node, + subscripts, + bitstride, + ); + llvm::LLVMDIBuilderCreateTypedef( + di_builder, + vector_ty, + name.as_ptr(), + name.len(), + file_metadata, + line_number, + Some(containing_scope), + layout.align.bits() as u32, + ) + }; + + debug_context(cx).type_map.insert(unique_type_id, metadata); + DINodeCreationResult { di_node: metadata, already_stored_in_typemap: true } +} + //=----------------------------------------------------------------------------- // Tuples //=----------------------------------------------------------------------------- diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index bd90f596eb3f6..4f590f6dd71f0 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -3,7 +3,8 @@ use std::ffi::c_uint; use std::{assert_matches, ptr}; use rustc_abi::{ - Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size, WrappingRange, + Align, BackendRepr, ExternAbi, Float, HasDataLayout, NumScalableVectors, Primitive, Size, + WrappingRange, }; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; @@ -580,6 +581,136 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.pointercast(val, self.type_ptr()) } + sym::sve_cast => { + let Some((in_cnt, in_elem, in_num_vecs)) = + args[0].layout.ty.scalable_vector_parts(self.cx.tcx) + else { + bug!("input parameter to `sve_cast` was not scalable vector"); + }; + let out_layout = self.layout_of(fn_args.type_at(1)); + let Some((out_cnt, out_elem, out_num_vecs)) = + out_layout.ty.scalable_vector_parts(self.cx.tcx) + else { + bug!("output parameter to `sve_cast` was not scalable vector"); + }; + assert_eq!(in_cnt, out_cnt); + assert_eq!(in_num_vecs, out_num_vecs); + let out_llty = self.backend_type(out_layout); + match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) { + Some(val) => val, + _ => bug!("could not cast scalable vectors"), + } + } + + sym::sve_tuple_create2 => { + assert_matches!( + self.layout_of(fn_args.type_at(0)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(1), + .. + } + ); + let tuple_ty = self.layout_of(fn_args.type_at(1)); + assert_matches!( + tuple_ty.backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(2), + .. + } + ); + let ret = self.const_poison(self.backend_type(tuple_ty)); + let ret = self.insert_value(ret, args[0].immediate(), 0); + self.insert_value(ret, args[1].immediate(), 1) + } + + sym::sve_tuple_create3 => { + assert_matches!( + self.layout_of(fn_args.type_at(0)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(1), + .. + } + ); + let tuple_ty = self.layout_of(fn_args.type_at(1)); + assert_matches!( + tuple_ty.backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(3), + .. + } + ); + let ret = self.const_poison(self.backend_type(tuple_ty)); + let ret = self.insert_value(ret, args[0].immediate(), 0); + let ret = self.insert_value(ret, args[1].immediate(), 1); + self.insert_value(ret, args[2].immediate(), 2) + } + + sym::sve_tuple_create4 => { + assert_matches!( + self.layout_of(fn_args.type_at(0)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(1), + .. + } + ); + let tuple_ty = self.layout_of(fn_args.type_at(1)); + assert_matches!( + tuple_ty.backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(4), + .. + } + ); + let ret = self.const_poison(self.backend_type(tuple_ty)); + let ret = self.insert_value(ret, args[0].immediate(), 0); + let ret = self.insert_value(ret, args[1].immediate(), 1); + let ret = self.insert_value(ret, args[2].immediate(), 2); + self.insert_value(ret, args[3].immediate(), 3) + } + + sym::sve_tuple_get => { + assert_matches!( + self.layout_of(fn_args.type_at(0)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), + .. + } + ); + assert_matches!( + self.layout_of(fn_args.type_at(1)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(1), + .. + } + ); + self.extract_value( + args[0].immediate(), + fn_args.const_at(2).to_leaf().to_i32() as u64, + ) + } + + sym::sve_tuple_set => { + assert_matches!( + self.layout_of(fn_args.type_at(0)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), + .. + } + ); + assert_matches!( + self.layout_of(fn_args.type_at(1)).backend_repr, + BackendRepr::SimdScalableVector { + number_of_vectors: NumScalableVectors(1), + .. + } + ); + self.insert_value( + args[0].immediate(), + args[1].immediate(), + fn_args.const_at(2).to_leaf().to_i32() as u64, + ) + } + _ if name.as_str().starts_with("simd_") => { // Unpack non-power-of-2 #[repr(packed, simd)] arguments. // This gives them the expected layout of a regular #[repr(simd)] vector. @@ -2637,96 +2768,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( out_len } ); - // casting cares about nominal type, not just structural type - if in_elem == out_elem { - return Ok(args[0].immediate()); - } - - #[derive(Copy, Clone)] - enum Sign { - Unsigned, - Signed, - } - use Sign::*; - - enum Style { - Float, - Int(Sign), - Unsupported, - } - - let (in_style, in_width) = match in_elem.kind() { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::Int(i) => ( - Style::Int(Signed), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(Unsigned), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - let (out_style, out_width) = match out_elem.kind() { - ty::Int(i) => ( - Style::Int(Signed), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(Unsigned), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - - match (in_style, out_style) { - (Style::Int(sign), Style::Int(_)) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => match sign { - Sign::Signed => bx.sext(args[0].immediate(), llret_ty), - Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty), - }, - }); - } - (Style::Int(Sign::Signed), Style::Float) => { - return Ok(bx.sitofp(args[0].immediate(), llret_ty)); - } - (Style::Int(Sign::Unsigned), Style::Float) => { - return Ok(bx.uitofp(args[0].immediate(), llret_ty)); - } - (Style::Float, Style::Int(sign)) => { - return Ok(match (sign, name == sym::simd_as) { - (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty), - (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty), - (_, true) => bx.cast_float_to_int( - matches!(sign, Sign::Signed), - args[0].immediate(), - llret_ty, - ), - }); - } - (Style::Float, Style::Float) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), - }); - } - _ => { /* Unsupported. Fallthrough. */ } + match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) { + Some(val) => return Ok(val), + None => return_error!(InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem + }), } - return_error!(InvalidMonomorphization::UnsupportedCast { - span, - name, - in_ty, - in_elem, - ret_ty, - out_elem - }); } macro_rules! arith_binary { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { @@ -2900,3 +2952,86 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span_bug!(span, "unknown SIMD intrinsic"); } + +/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`. +fn simd_cast<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + name: Symbol, + args: &[OperandRef<'tcx, &'ll Value>], + llret_ty: &'ll Type, + in_elem: Ty<'tcx>, + out_elem: Ty<'tcx>, +) -> Option<&'ll Value> { + // Casting cares about nominal type, not just structural type + if in_elem == out_elem { + return Some(args[0].immediate()); + } + + #[derive(Copy, Clone)] + enum Sign { + Unsigned, + Signed, + } + use Sign::*; + + enum Style { + Float, + Int(Sign), + Unsupported, + } + + let (in_style, in_width) = match in_elem.kind() { + // vectors of pointer-sized integers should've been + // disallowed before here, so this unwrap is safe. + ty::Int(i) => ( + Style::Int(Signed), + i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Uint(u) => ( + Style::Int(Unsigned), + u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + let (out_style, out_width) = match out_elem.kind() { + ty::Int(i) => ( + Style::Int(Signed), + i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Uint(u) => ( + Style::Int(Unsigned), + u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + + match (in_style, out_style) { + (Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) { + Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => match sign { + Sign::Signed => bx.sext(args[0].immediate(), llret_ty), + Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty), + }, + }), + (Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)), + (Style::Int(Sign::Unsigned), Style::Float) => { + Some(bx.uitofp(args[0].immediate(), llret_ty)) + } + (Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) { + (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty), + (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty), + (_, true) => { + bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty) + } + }), + (Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) { + Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), + }), + _ => None, + } +} diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index f9af42494cada..7ed1e4d29b69b 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2299,6 +2299,23 @@ unsafe extern "C" { Params: Option<&'a DIArray>, ); + pub(crate) fn LLVMRustDIGetOrCreateSubrange<'a>( + Builder: &DIBuilder<'a>, + CountNode: Option<&'a Metadata>, + LB: &'a Metadata, + UB: &'a Metadata, + Stride: Option<&'a Metadata>, + ) -> &'a Metadata; + + pub(crate) fn LLVMRustDICreateVectorType<'a>( + Builder: &DIBuilder<'a>, + Size: u64, + AlignInBits: u32, + Type: &'a DIType, + Subscripts: &'a DIArray, + BitStride: Option<&'a Metadata>, + ) -> &'a Metadata; + pub(crate) fn LLVMRustDILocationCloneWithBaseDiscriminator<'a>( Location: &'a DILocation, BD: c_uint, diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index e586ed0dd6b07..6d0490e4a1f79 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -24,14 +24,54 @@ fn uncached_llvm_type<'a, 'tcx>( let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } - BackendRepr::SimdScalableVector { ref element, count } => { + BackendRepr::SimdScalableVector { ref element, count, number_of_vectors } => { let element = if element.is_bool() { cx.type_i1() } else { layout.scalar_llvm_type_at(cx, *element) }; - return cx.type_scalable_vector(element, count); + let vector_type = cx.type_scalable_vector(element, count); + return match number_of_vectors.0 { + 1 => vector_type, + 2 => cx.type_struct(&[vector_type, vector_type], false), + 3 => cx.type_struct(&[vector_type, vector_type, vector_type], false), + 4 => cx.type_struct(&[vector_type, vector_type, vector_type, vector_type], false), + 5 => cx.type_struct( + &[vector_type, vector_type, vector_type, vector_type, vector_type], + false, + ), + 6 => cx.type_struct( + &[vector_type, vector_type, vector_type, vector_type, vector_type, vector_type], + false, + ), + 7 => cx.type_struct( + &[ + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + ], + false, + ), + 8 => cx.type_struct( + &[ + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + vector_type, + ], + false, + ), + _ => bug!("`#[rustc_scalable_vector]` tuple struct with too many fields"), + }; } BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {} } diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 2f93f688c316d..60ab13dbc6f76 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -438,8 +438,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if operand.layout.ty.is_scalable_vector() && bx.sess().target.arch == rustc_target::spec::Arch::AArch64 { - let (count, element_ty) = - operand.layout.ty.scalable_vector_element_count_and_type(bx.tcx()); + let (count, element_ty, _) = + operand.layout.ty.scalable_vector_parts(bx.tcx()).unwrap(); // i.e. `` when `N != 16` if element_ty.is_bool() && count != 16 { return; diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index d62e622b6fed3..53518fd816f31 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -1,3 +1,5 @@ +use std::ops::Deref as _; + use rustc_abi::{ Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants, }; @@ -109,8 +111,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> Self { - if layout.is_runtime_sized() { - Self::alloca_runtime_sized(bx, layout) + if layout.deref().is_scalable_vector() { + Self::alloca_scalable(bx, layout) } else { Self::alloca_size(bx, layout.size, layout) } @@ -151,16 +153,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } } - fn alloca_runtime_sized>( + fn alloca_scalable>( bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> Self { - let (element_count, ty) = layout.ty.scalable_vector_element_count_and_type(bx.tcx()); - PlaceValue::new_sized( - bx.scalable_alloca(element_count as u64, layout.align.abi, ty), - layout.align.abi, - ) - .with_type(layout) + PlaceValue::new_sized(bx.alloca_with_ty(layout), layout.align.abi).with_type(layout) } } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 05e94b8019f49..5092f28a33f7b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -235,7 +235,7 @@ pub trait BuilderMethods<'a, 'tcx>: fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; fn alloca(&mut self, size: Size, align: Align) -> Self::Value; - fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value; + fn alloca_with_ty(&mut self, layout: TyAndLayout<'tcx>) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 4178a9742e268..22784a8868ab5 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -20,6 +20,7 @@ pub trait ConstCodegenMethods: BackendTypes { fn const_i8(&self, i: i8) -> Self::Value; fn const_i16(&self, i: i16) -> Self::Value; fn const_i32(&self, i: i32) -> Self::Value; + fn const_i64(&self, i: i64) -> Self::Value; fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value; fn const_u32(&self, i: u32) -> Self::Value; diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index a1c8c0150a66e..4d8e537340613 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -785,6 +785,13 @@ pub(crate) fn check_intrinsic_type( sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)), sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)), + sym::sve_cast => (2, 0, vec![param(0)], param(1)), + sym::sve_tuple_create2 => (2, 0, vec![param(0), param(0)], param(1)), + sym::sve_tuple_create3 => (2, 0, vec![param(0), param(0), param(0)], param(1)), + sym::sve_tuple_create4 => (2, 0, vec![param(0), param(0), param(0), param(0)], param(1)), + sym::sve_tuple_get => (2, 1, vec![param(0)], param(1)), + sym::sve_tuple_set => (2, 1, vec![param(0), param(1)], param(0)), + sym::atomic_cxchg | sym::atomic_cxchgweak => ( 1, 2, diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index eabc1c94f26e9..46108445ccba0 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -70,6 +70,10 @@ using namespace llvm::object; // This opcode is an LLVM detail that could hypothetically change (?), so // verify that the hard-coded value in `dwarf_const.rs` still agrees with LLVM. static_assert(dwarf::DW_OP_LLVM_fragment == 0x1000); +static_assert(dwarf::DW_OP_constu == 0x10); +static_assert(dwarf::DW_OP_minus == 0x1c); +static_assert(dwarf::DW_OP_mul == 0x1e); +static_assert(dwarf::DW_OP_bregx == 0x92); static_assert(dwarf::DW_OP_stack_value == 0x9f); static LLVM_THREAD_LOCAL char *LastError; @@ -734,7 +738,7 @@ extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints, } template DIT *unwrapDIPtr(LLVMMetadataRef Ref) { - return (DIT *)(Ref ? unwrap(Ref) : nullptr); + return (DIT *)(Ref ? unwrap(Ref) : nullptr); } #define DIDescriptor DIScope @@ -1210,6 +1214,36 @@ extern "C" void LLVMRustDICompositeTypeReplaceArrays( DINodeArray(unwrap(Params))); } +// LLVM's C FFI bindings don't expose the overload of `GetOrCreateSubrange` +// which takes a metadata node as the upper bound. +extern "C" LLVMMetadataRef +LLVMRustDIGetOrCreateSubrange(LLVMDIBuilderRef Builder, + LLVMMetadataRef CountNode, LLVMMetadataRef LB, + LLVMMetadataRef UB, LLVMMetadataRef Stride) { + return wrap(unwrap(Builder)->getOrCreateSubrange( + unwrapDI(CountNode), unwrapDI(LB), + unwrapDI(UB), unwrapDI(Stride))); +} + +// LLVM's CI FFI bindings don't expose the `BitStride` parameter of +// `createVectorType`. +extern "C" LLVMMetadataRef +LLVMRustDICreateVectorType(LLVMDIBuilderRef Builder, uint64_t Size, + uint32_t AlignInBits, LLVMMetadataRef Type, + LLVMMetadataRef Subscripts, + LLVMMetadataRef BitStride) { +#if LLVM_VERSION_GE(22, 0) + return wrap(unwrap(Builder)->createVectorType( + Size, AlignInBits, unwrapDI(Type), + DINodeArray(unwrapDI(Subscripts)), + unwrapDI(BitStride))); +#else + return wrap(unwrap(Builder)->createVectorType( + Size, AlignInBits, unwrapDI(Type), + DINodeArray(unwrapDI(Subscripts)))); +#endif +} + extern "C" LLVMMetadataRef LLVMRustDILocationCloneWithBaseDiscriminator(LLVMMetadataRef Location, unsigned BD) { diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 9c22d158154fb..b0455ff1bc69d 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -7,7 +7,7 @@ use std::debug_assert_matches; use std::ops::{ControlFlow, Range}; use hir::def::{CtorKind, DefKind}; -use rustc_abi::{FIRST_VARIANT, FieldIdx, ScalableElt, VariantIdx}; +use rustc_abi::{FIRST_VARIANT, FieldIdx, NumScalableVectors, ScalableElt, VariantIdx}; use rustc_errors::{ErrorGuaranteed, MultiSpan}; use rustc_hir as hir; use rustc_hir::LangItem; @@ -1261,17 +1261,27 @@ impl<'tcx> Ty<'tcx> { } } - pub fn scalable_vector_element_count_and_type(self, tcx: TyCtxt<'tcx>) -> (u16, Ty<'tcx>) { + pub fn scalable_vector_parts( + self, + tcx: TyCtxt<'tcx>, + ) -> Option<(u16, Ty<'tcx>, NumScalableVectors)> { let Adt(def, args) = self.kind() else { - bug!("`scalable_vector_size_and_type` called on invalid type") + return None; }; - let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else { - bug!("`scalable_vector_size_and_type` called on non-scalable vector type"); + let (num_vectors, vec_def) = match def.repr().scalable? { + ScalableElt::ElementCount(_) => (NumScalableVectors::for_non_tuple(), *def), + ScalableElt::Container => ( + NumScalableVectors::from_field_count(def.non_enum_variant().fields.len())?, + def.non_enum_variant().fields[FieldIdx::ZERO].ty(tcx, args).ty_adt_def()?, + ), }; - let variant = def.non_enum_variant(); + let Some(ScalableElt::ElementCount(element_count)) = vec_def.repr().scalable else { + return None; + }; + let variant = vec_def.non_enum_variant(); assert_eq!(variant.fields.len(), 1); let field_ty = variant.fields[FieldIdx::ZERO].ty(tcx, args); - (element_count, field_ty) + Some((element_count, field_ty, num_vectors)) } pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) { diff --git a/compiler/rustc_public/src/abi.rs b/compiler/rustc_public/src/abi.rs index 1403e57a7e6a9..4a780d652df81 100644 --- a/compiler/rustc_public/src/abi.rs +++ b/compiler/rustc_public/src/abi.rs @@ -232,6 +232,10 @@ pub enum TagEncoding { }, } +/// How many scalable vectors are in a `ValueAbi::ScalableVector`? +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] +pub struct NumScalableVectors(pub(crate) u8); + /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] @@ -245,6 +249,7 @@ pub enum ValueAbi { ScalableVector { element: Scalar, count: u64, + number_of_vectors: NumScalableVectors, }, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. diff --git a/compiler/rustc_public/src/unstable/convert/stable/abi.rs b/compiler/rustc_public/src/unstable/convert/stable/abi.rs index b3edc6194c307..d8c4cee7abbe4 100644 --- a/compiler/rustc_public/src/unstable/convert/stable/abi.rs +++ b/compiler/rustc_public/src/unstable/convert/stable/abi.rs @@ -10,8 +10,9 @@ use rustc_target::callconv; use crate::abi::{ AddressSpace, ArgAbi, CallConvention, FieldsShape, FloatLength, FnAbi, IntegerLength, - IntegerType, Layout, LayoutShape, PassMode, Primitive, ReprFlags, ReprOptions, Scalar, - TagEncoding, TyAndLayout, ValueAbi, VariantFields, VariantsShape, WrappingRange, + IntegerType, Layout, LayoutShape, NumScalableVectors, PassMode, Primitive, ReprFlags, + ReprOptions, Scalar, TagEncoding, TyAndLayout, ValueAbi, VariantFields, VariantsShape, + WrappingRange, }; use crate::compiler_interface::BridgeTys; use crate::target::MachineSize as Size; @@ -249,6 +250,18 @@ impl<'tcx> Stable<'tcx> for rustc_abi::TagEncoding { } } +impl<'tcx> Stable<'tcx> for rustc_abi::NumScalableVectors { + type T = NumScalableVectors; + + fn stable<'cx>( + &self, + _tables: &mut Tables<'cx, BridgeTys>, + _cx: &CompilerCtxt<'cx, BridgeTys>, + ) -> Self::T { + NumScalableVectors(self.0) + } +} + impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { type T = ValueAbi; @@ -265,8 +278,12 @@ impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { rustc_abi::BackendRepr::SimdVector { element, count } => { ValueAbi::Vector { element: element.stable(tables, cx), count } } - rustc_abi::BackendRepr::SimdScalableVector { element, count } => { - ValueAbi::ScalableVector { element: element.stable(tables, cx), count } + rustc_abi::BackendRepr::SimdScalableVector { element, count, number_of_vectors } => { + ValueAbi::ScalableVector { + element: element.stable(tables, cx), + count, + number_of_vectors: number_of_vectors.stable(tables, cx), + } } rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, } diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 257ac3f51c2c1..4f4dcaa705b47 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1973,6 +1973,12 @@ symbols! { suggestion, super_let, supertrait_item_shadowing, + sve_cast, + sve_tuple_create2, + sve_tuple_create3, + sve_tuple_create4, + sve_tuple_get, + sve_tuple_set, sym, sync, synthetic, diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 391f50edf23fa..136df923ee47a 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -4,8 +4,8 @@ use rustc_abi::Integer::{I8, I32}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout, - LayoutCalculatorError, LayoutData, Niche, ReprOptions, ScalableElt, Scalar, Size, StructKind, - TagEncoding, VariantIdx, Variants, WrappingRange, + LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, + VariantIdx, Variants, WrappingRange, }; use rustc_hashes::Hash64; use rustc_hir as hir; @@ -572,30 +572,26 @@ fn layout_of_uncached<'tcx>( // ```rust (ignore, example) // #[rustc_scalable_vector(3)] // struct svuint32_t(u32); + // + // #[rustc_scalable_vector] + // struct svuint32x2_t(svuint32_t, svuint32_t); // ``` - ty::Adt(def, args) - if matches!(def.repr().scalable, Some(ScalableElt::ElementCount(..))) => - { - let Some(element_ty) = def - .is_struct() - .then(|| &def.variant(FIRST_VARIANT).fields) - .filter(|fields| fields.len() == 1) - .map(|fields| fields[FieldIdx::ZERO].ty(tcx, args)) + ty::Adt(def, _args) if def.repr().scalable() => { + let Some((element_count, element_ty, number_of_vectors)) = + ty.scalable_vector_parts(tcx) else { let guar = tcx .dcx() - .delayed_bug("#[rustc_scalable_vector] was applied to an invalid type"); - return Err(error(cx, LayoutError::ReferencesError(guar))); - }; - let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else { - let guar = tcx - .dcx() - .delayed_bug("#[rustc_scalable_vector] was applied to an invalid type"); + .delayed_bug("`#[rustc_scalable_vector]` was applied to an invalid type"); return Err(error(cx, LayoutError::ReferencesError(guar))); }; let element_layout = cx.layout_of(element_ty)?; - map_layout(cx.calc.scalable_vector_type(element_layout, element_count as u64))? + map_layout(cx.calc.scalable_vector_type( + element_layout, + element_count as u64, + number_of_vectors, + ))? } // SIMD vector types. diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd/mod.rs similarity index 99% rename from library/core/src/intrinsics/simd.rs rename to library/core/src/intrinsics/simd/mod.rs index 5fb2102c319e2..be378d6e41d2b 100644 --- a/library/core/src/intrinsics/simd.rs +++ b/library/core/src/intrinsics/simd/mod.rs @@ -2,6 +2,8 @@ //! //! In this module, a "vector" is any `repr(simd)` type. +pub mod scalable; + use crate::marker::ConstParamTy; /// Inserts an element into a vector, returning the updated vector. diff --git a/library/core/src/intrinsics/simd/scalable.rs b/library/core/src/intrinsics/simd/scalable.rs new file mode 100644 index 0000000000000..c09c03d15f90f --- /dev/null +++ b/library/core/src/intrinsics/simd/scalable.rs @@ -0,0 +1,97 @@ +//! Scalable vector compiler intrinsics. +//! +//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type. + +/// Numerically casts a vector, elementwise. +/// +/// `T` and `U` must be vectors of integers or floats, and must have the same length. +/// +/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB. +/// When casting integers to floats, the result is rounded. +/// Otherwise, truncates or extends the value, maintaining the sign for signed integers. +/// +/// # Safety +/// Casting from integer types is always safe. +/// Casting between two float types is also always safe. +/// +/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`. +/// Specifically, each element must: +/// * Not be `NaN` +/// * Not be infinite +/// * Be representable in the return type, after truncating off its fractional part +#[cfg(target_arch = "aarch64")] +#[rustc_intrinsic] +#[rustc_nounwind] +#[target_feature(enable = "sve")] +pub unsafe fn sve_cast(x: T) -> U; + +/// Create a tuple of two vectors. +/// +/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a +/// scalable vector (`#[rustc_scalable_vector(N)]`). `SVecTup` must be a tuple of vectors of +/// type `SVec`. +/// +/// Corresponds to Clang's `__builtin_sve_svcreate2*` builtins. +#[rustc_nounwind] +#[rustc_intrinsic] +pub unsafe fn sve_tuple_create2(x0: SVec, x1: SVec) -> SVecTup; + +/// Create a tuple of three vectors. +/// +/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a +/// scalable vector (`#[rustc_scalable_vector(N)]`). `SVecTup` must be a tuple of vectors of +/// type `SVec`. +/// +/// Corresponds to Clang's `__builtin_sve_svcreate3*` builtins. +#[cfg(target_arch = "aarch64")] +#[rustc_intrinsic] +#[rustc_nounwind] +#[target_feature(enable = "sve")] +pub unsafe fn sve_tuple_create3(x0: SVec, x1: SVec, x2: SVec) -> SVecTup; + +/// Create a tuple of four vectors. +/// +/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a +/// scalable vector (`#[rustc_scalable_vector(N)]`). `SVecTup` must be a tuple of vectors of +/// type `SVec`. +/// +/// Corresponds to Clang's `__builtin_sve_svcreate4*` builtins. +#[cfg(target_arch = "aarch64")] +#[rustc_intrinsic] +#[rustc_nounwind] +#[target_feature(enable = "sve")] +pub unsafe fn sve_tuple_create4(x0: SVec, x1: SVec, x2: SVec, x3: SVec) -> SVecTup; + +/// Get one vector from a tuple of vectors. +/// +/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a +/// scalable vector (`#[rustc_scalable_vector(N)]`). `SVecTup` must be a tuple of vectors of +/// type `SVec`. +/// +/// Corresponds to Clang's `__builtin_sve_svget*` builtins. +/// +/// # Safety +/// +/// `IDX` must be in-bounds of the tuple. +#[cfg(target_arch = "aarch64")] +#[rustc_intrinsic] +#[rustc_nounwind] +#[target_feature(enable = "sve")] +pub unsafe fn sve_tuple_get(tuple: SVecTup) -> SVec; + +/// Change one vector in a tuple of vectors. +/// +/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a +/// scalable vector (`#[rustc_scalable_vector(N)]`). `SVecTup` must be a tuple of vectors of +/// type `SVec`. +/// +/// Corresponds to Clang's `__builtin_sve_svset*` builtins. +/// +/// # Safety +/// +/// `IDX` must be in-bounds of the tuple. +#[cfg(target_arch = "aarch64")] +#[rustc_intrinsic] +#[rustc_nounwind] +#[target_feature(enable = "sve")] +pub unsafe fn sve_tuple_set(tuple: SVecTup, x: SVec) -> SVecTup; diff --git a/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x2.rs b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x2.rs new file mode 100644 index 0000000000000..1aaba621d0e10 --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x2.rs @@ -0,0 +1,149 @@ +//@ only-aarch64 +//@ compile-flags: -Cdebuginfo=2 -Copt-level=0 + +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(rustc_attrs)] + +// Test that we generate the correct debuginfo for scalable vector types. + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svint8_t(i8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint8x2_t(svint8_t, svint8_t); + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svuint8_t(u8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint8x2_t(svuint8_t, svuint8_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svint16_t(i16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint16x2_t(svint16_t, svint16_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svuint16_t(u16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint16x2_t(svuint16_t, svuint16_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svint32_t(i32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint32x2_t(svint32_t, svint32_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svuint32_t(u32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint32x2_t(svuint32_t, svuint32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svint64_t(i64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint64x2_t(svint64_t, svint64_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svuint64_t(u64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint64x2_t(svuint64_t, svuint64_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svfloat32_t(f32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat32x2_t(svfloat32_t, svfloat32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svfloat64_t(f64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat64x2_t(svfloat64_t, svfloat64_t); + +#[target_feature(enable = "sve")] +pub fn locals() { + // CHECK-DAG: name: "svint8x2_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x2:[0-9]+]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "i8", size: 8, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS8x2]] = !{![[REALELTS8x2:[0-9]+]]} + // CHECK-DAG: ![[REALELTS8x2]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 16, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s8: svint8x2_t; + + // CHECK-DAG: name: "svuint8x2_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x2]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "u8", size: 8, encoding: DW_ATE_unsigned) + let u8: svuint8x2_t; + + // CHECK-DAG: name: "svint16x2_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x2:[0-9]+]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "i16", size: 16, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS16x2]] = !{![[REALELTS16x2:[0-9]+]]} + // CHECK-DAG: ![[REALELTS16x2]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 8, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s16: svint16x2_t; + + // CHECK-DAG: name: "svuint16x2_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x2]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "u16", size: 16, encoding: DW_ATE_unsigned) + let u16: svuint16x2_t; + + // CHECK-DAG: name: "svint32x2_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x2:[0-9]+]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "i32", size: 32, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS32x2]] = !{![[REALELTS32x2:[0-9]+]]} + // CHECK-DAG: ![[REALELTS32x2]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s32: svint32x2_t; + + // CHECK-DAG: name: "svuint32x2_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x2]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "u32", size: 32, encoding: DW_ATE_unsigned) + let u32: svuint32x2_t; + + // CHECK-DAG: name: "svint64x2_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x2_64:[0-9]+]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "i64", size: 64, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS1x2_64]] = !{![[REALELTS1x2_64:[0-9]+]]} + // CHECK-DAG: ![[REALELTS1x2_64]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 2, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s64: svint64x2_t; + + // CHECK-DAG: name: "svuint64x2_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x2_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned) + let u64: svuint64x2_t; + + // CHECK: name: "svfloat32x2_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x2]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "f32", size: 32, encoding: DW_ATE_float) + let f32: svfloat32x2_t; + + // CHECK: name: "svfloat64x2_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x2_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "f64", size: 64, encoding: DW_ATE_float) + let f64: svfloat64x2_t; +} diff --git a/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x3.rs b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x3.rs new file mode 100644 index 0000000000000..b19051e2c743d --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x3.rs @@ -0,0 +1,149 @@ +//@ only-aarch64 +//@ compile-flags: -Cdebuginfo=2 -Copt-level=0 + +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(rustc_attrs)] + +// Test that we generate the correct debuginfo for scalable vector types. + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svint8_t(i8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint8x3_t(svint8_t, svint8_t, svint8_t); + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svuint8_t(u8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint8x3_t(svuint8_t, svuint8_t, svuint8_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svint16_t(i16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint16x3_t(svint16_t, svint16_t, svint16_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svuint16_t(u16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint16x3_t(svuint16_t, svuint16_t, svuint16_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svint32_t(i32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint32x3_t(svint32_t, svint32_t, svint32_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svuint32_t(u32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint32x3_t(svuint32_t, svuint32_t, svuint32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svint64_t(i64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint64x3_t(svint64_t, svint64_t, svint64_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svuint64_t(u64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint64x3_t(svuint64_t, svuint64_t, svuint64_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svfloat32_t(f32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat32x3_t(svfloat32_t, svfloat32_t, svfloat32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svfloat64_t(f64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat64x3_t(svfloat64_t, svfloat64_t, svfloat64_t); + +#[target_feature(enable = "sve")] +pub fn locals() { + // CHECK-DAG: name: "svint8x3_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x3:[0-9]+]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "i8", size: 8, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS8x3]] = !{![[REALELTS8x3:[0-9]+]]} + // CHECK-DAG: ![[REALELTS8x3]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 24, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s8: svint8x3_t; + + // CHECK-DAG: name: "svuint8x3_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x3]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "u8", size: 8, encoding: DW_ATE_unsigned) + let u8: svuint8x3_t; + + // CHECK-DAG: name: "svint16x3_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x3:[0-9]+]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "i16", size: 16, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS16x3]] = !{![[REALELTS16x3:[0-9]+]]} + // CHECK-DAG: ![[REALELTS16x3]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 12, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s16: svint16x3_t; + + // CHECK-DAG: name: "svuint16x3_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x3]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "u16", size: 16, encoding: DW_ATE_unsigned) + let u16: svuint16x3_t; + + // CHECK-DAG: name: "svint32x3_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x3:[0-9]+]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "i32", size: 32, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS32x3]] = !{![[REALELTS32x3:[0-9]+]]} + // CHECK-DAG: ![[REALELTS32x3]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 6, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s32: svint32x3_t; + + // CHECK-DAG: name: "svuint32x3_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x3]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "u32", size: 32, encoding: DW_ATE_unsigned) + let u32: svuint32x3_t; + + // CHECK-DAG: name: "svint64x3_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x3_64:[0-9]+]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "i64", size: 64, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS1x3_64]] = !{![[REALELTS1x3_64:[0-9]+]]} + // CHECK-DAG: ![[REALELTS1x3_64]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 3, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s64: svint64x3_t; + + // CHECK-DAG: name: "svuint64x3_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x3_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned) + let u64: svuint64x3_t; + + // CHECK: name: "svfloat32x3_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x3]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "f32", size: 32, encoding: DW_ATE_float) + let f32: svfloat32x3_t; + + // CHECK: name: "svfloat64x3_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x3_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "f64", size: 64, encoding: DW_ATE_float) + let f64: svfloat64x3_t; +} diff --git a/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x4.rs b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x4.rs new file mode 100644 index 0000000000000..911af76f42ebb --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/debuginfo-tuples-x4.rs @@ -0,0 +1,149 @@ +//@ only-aarch64 +//@ compile-flags: -Cdebuginfo=2 -Copt-level=0 + +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(rustc_attrs)] + +// Test that we generate the correct debuginfo for scalable vector types. + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svint8_t(i8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint8x4_t(svint8_t, svint8_t, svint8_t, svint8_t); + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svuint8_t(u8); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint8x4_t(svuint8_t, svuint8_t, svuint8_t, svuint8_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svint16_t(i16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint16x4_t(svint16_t, svint16_t, svint16_t, svint16_t); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svuint16_t(u16); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint16x4_t(svuint16_t, svuint16_t, svuint16_t, svuint16_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svint32_t(i32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint32x4_t(svint32_t, svint32_t, svint32_t, svint32_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svuint32_t(u32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint32x4_t(svuint32_t, svuint32_t, svuint32_t, svuint32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svint64_t(i64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svint64x4_t(svint64_t, svint64_t, svint64_t, svint64_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svuint64_t(u64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svuint64x4_t(svuint64_t, svuint64_t, svuint64_t, svuint64_t); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svfloat32_t(f32); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat32x4_t(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svfloat64_t(f64); + +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +struct svfloat64x4_t(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); + +#[target_feature(enable = "sve")] +pub fn locals() { + // CHECK-DAG: name: "svint8x4_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x4:[0-9]+]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "i8", size: 8, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS8x4]] = !{![[REALELTS8x4:[0-9]+]]} + // CHECK-DAG: ![[REALELTS8x4]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 32, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s8: svint8x4_t; + + // CHECK-DAG: name: "svuint8x4_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8x4]]) + // CHECK-DAG: ![[ELTTY8]] = !DIBasicType(name: "u8", size: 8, encoding: DW_ATE_unsigned) + let u8: svuint8x4_t; + + // CHECK-DAG: name: "svint16x4_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x4:[0-9]+]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "i16", size: 16, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS16x4]] = !{![[REALELTS16x4:[0-9]+]]} + // CHECK-DAG: ![[REALELTS16x4]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 16, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s16: svint16x4_t; + + // CHECK-DAG: name: "svuint16x4_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16x4]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "u16", size: 16, encoding: DW_ATE_unsigned) + let u16: svuint16x4_t; + + // CHECK-DAG: name: "svint32x4_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x4:[0-9]+]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "i32", size: 32, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS32x4]] = !{![[REALELTS32x4:[0-9]+]]} + // CHECK-DAG: ![[REALELTS32x4]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 8, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s32: svint32x4_t; + + // CHECK-DAG: name: "svuint32x4_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x4]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "u32", size: 32, encoding: DW_ATE_unsigned) + let u32: svuint32x4_t; + + // CHECK-DAG: name: "svint64x4_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x4_64:[0-9]+]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "i64", size: 64, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS1x4_64]] = !{![[REALELTS1x4_64:[0-9]+]]} + // CHECK-DAG: ![[REALELTS1x4_64]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s64: svint64x4_t; + + // CHECK-DAG: name: "svuint64x4_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x4_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned) + let u64: svuint64x4_t; + + // CHECK: name: "svfloat32x4_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32x4]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "f32", size: 32, encoding: DW_ATE_float) + let f32: svfloat32x4_t; + + // CHECK: name: "svfloat64x4_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS1x4_64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "f64", size: 64, encoding: DW_ATE_float) + let f64: svfloat64x4_t; +} diff --git a/tests/codegen-llvm/scalable-vectors/debuginfo.rs b/tests/codegen-llvm/scalable-vectors/debuginfo.rs new file mode 100644 index 0000000000000..f4b34a5e1e7eb --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/debuginfo.rs @@ -0,0 +1,123 @@ +// ignore-tidy-linelength +//@ only-aarch64 +//@ compile-flags: -Cdebuginfo=2 -Copt-level=0 +//@ revisions: POST-LLVM-22 PRE-LLVM-22 +//@ [PRE-LLVM-22] max-llvm-major-version: 21 +//@ [POST-LLVM-22] min-llvm-version: 22 + +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(rustc_attrs)] + +// Test that we generate the correct debuginfo for scalable vector types. + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svbool_t(bool); + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svint8_t(i8); + +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +struct svuint8_t(u8); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svint16_t(i16); + +#[rustc_scalable_vector(8)] +#[allow(non_camel_case_types)] +struct svuint16_t(u16); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svint32_t(i32); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svuint32_t(u32); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svint64_t(i64); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svuint64_t(u64); + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +struct svfloat32_t(f32); + +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +struct svfloat64_t(f64); + +#[target_feature(enable = "sve")] +pub fn locals() { + // CHECK-DAG: name: "svbool_t",{{.*}}, baseType: ![[CT1:[0-9]+]] + // PRE-LLVM-22-DAG: ![[CT1]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTYU8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8:[0-9]+]]) + // POST-LLVM-22-DAG: ![[CT1]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTYU8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8:[0-9]+]], bitStride: i64 1) + // CHECK-DAG: ![[ELTTYU8]] = !DIBasicType(name: "u8", size: 8, encoding: DW_ATE_unsigned) + // CHECK-DAG: ![[ELTS8]] = !{![[REALELTS8:[0-9]+]]} + // CHECK-DAG: ![[REALELTS8]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 8, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let b8: svbool_t; + + // CHECK-DAG: name: "svint8_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // CHECK-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTYS8:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8:[0-9]+]]) + // CHECK-DAG: ![[ELTTYS8]] = !DIBasicType(name: "i8", size: 8, encoding: DW_ATE_signed) + let s8: svint8_t; + + // PRE-LLVM-22-DAG: name: "svuint8_t",{{.*}}, baseType: ![[CT1:[0-9]+]] + // POST-LLVM-22-DAG: name: "svuint8_t",{{.*}}, baseType: ![[CT8:[0-9]+]] + // POST-LLVM-22-DAG: ![[CT8]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTYU8]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS8]]) + let u8: svuint8_t; + + // CHECK-DAG: name: "svint16_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16:[0-9]+]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "i16", size: 16, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS16]] = !{![[REALELTS16:[0-9]+]]} + // CHECK-DAG: ![[REALELTS16]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s16: svint16_t; + + // CHECK-DAG: name: "svuint16_t",{{.*}}, baseType: ![[CT16:[0-9]+]] + // CHECK-DAG: ![[CT16]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY16:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS16]]) + // CHECK-DAG: ![[ELTTY16]] = !DIBasicType(name: "u16", size: 16, encoding: DW_ATE_unsigned) + let u16: svuint16_t; + + // CHECK-DAG: name: "svint32_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32:[0-9]+]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "i32", size: 32, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS32]] = !{![[REALELTS32:[0-9]+]]} + // CHECK-DAG: ![[REALELTS32]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 2, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s32: svint32_t; + + // CHECK-DAG: name: "svuint32_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "u32", size: 32, encoding: DW_ATE_unsigned) + let u32: svuint32_t; + + // CHECK-DAG: name: "svint64_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS64:[0-9]+]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "i64", size: 64, encoding: DW_ATE_signed) + // CHECK-DAG: ![[ELTS64]] = !{![[REALELTS64:[0-9]+]]} + // CHECK-DAG: ![[REALELTS64]] = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 1, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + let s64: svint64_t; + + // CHECK-DAG: name: "svuint64_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned) + let u64: svuint64_t; + + // CHECK: name: "svfloat32_t",{{.*}}, baseType: ![[CT32:[0-9]+]] + // CHECK-DAG: ![[CT32]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY32:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS32]]) + // CHECK-DAG: ![[ELTTY32]] = !DIBasicType(name: "f32", size: 32, encoding: DW_ATE_float) + let f32: svfloat32_t; + + // CHECK: name: "svfloat64_t",{{.*}}, baseType: ![[CT64:[0-9]+]] + // CHECK-DAG: ![[CT64]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[ELTTY64:[0-9]+]],{{.*}}, flags: DIFlagVector, elements: ![[ELTS64]]) + // CHECK-DAG: ![[ELTTY64]] = !DIBasicType(name: "f64", size: 64, encoding: DW_ATE_float) + let f64: svfloat64_t; +} diff --git a/tests/codegen-llvm/scalable-vectors/tuple-intrinsics.rs b/tests/codegen-llvm/scalable-vectors/tuple-intrinsics.rs new file mode 100644 index 0000000000000..e19fc40cb9d67 --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/tuple-intrinsics.rs @@ -0,0 +1,100 @@ +//@ build-pass +//@ only-aarch64 +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)] + +// Tests that tuples of scalable vectors are passed as immediates and that the intrinsics for +// creating/getting/setting tuples of scalable vectors generate the correct assembly + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svfloat32_t(f32); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +pub struct svfloat32x2_t(svfloat32_t, svfloat32_t); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +pub struct svfloat32x3_t(svfloat32_t, svfloat32_t, svfloat32_t); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector] +#[allow(non_camel_case_types)] +pub struct svfloat32x4_t(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); + +#[inline(never)] +#[target_feature(enable = "sve")] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} + +// CHECK: define { , } @svcreate2_f32( %x0, %x1) +#[no_mangle] +#[target_feature(enable = "sve")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + // CHECK: %1 = insertvalue { , } poison, %x0, 0 + // CHECK-NEXT: %2 = insertvalue { , } %1, %x1, 1 + unsafe { std::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} + +// CHECK: define { , , } @svcreate3_f32( %x0, %x1, %x2) +#[no_mangle] +#[target_feature(enable = "sve")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + // CHECK-LABEL: @_RNvCsk3YxfLN8zWY_6tuples13svcreate3_f32 + // CHECK: %1 = insertvalue { , , } poison, %x0, 0 + // CHECK-NEXT: %2 = insertvalue { , , } %1, %x1, 1 + // CHECK-NEXT: %3 = insertvalue { , , } %2, %x2, 2 + unsafe { std::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} + +// CHECK: define { , , , } @svcreate4_f32( %x0, %x1, %x2, %x3) +#[no_mangle] +#[target_feature(enable = "sve")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + // CHECK-LABEL: @_RNvCsk3YxfLN8zWY_6tuples13svcreate4_f32 + // CHECK: %1 = insertvalue { , , , } poison, %x0, 0 + // CHECK-NEXT: %2 = insertvalue { , , , } %1, %x1, 1 + // CHECK-NEXT: %3 = insertvalue { , , , } %2, %x2, 2 + // CHECK-NEXT: %4 = insertvalue { , , , } %3, %x3, 3 + unsafe { std::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} + +// CHECK: define @svget2_f32({ , } %tup) +#[no_mangle] +#[target_feature(enable = "sve")] +pub fn svget2_f32(tup: svfloat32x2_t) -> svfloat32_t { + // CHECK: %1 = extractvalue { , } %tup, 0 + unsafe { std::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IDX }>(tup) } +} + +// CHECK: define { , } @svset2_f32({ , } %tup, %x) +#[no_mangle] +#[target_feature(enable = "sve")] +pub fn svset2_f32(tup: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + // CHECK: %1 = insertvalue { , } %tup, %x, 0 + unsafe { std::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IDX }>(tup, x) } +} + +// This function exists only so there are calls to the generic functions +#[target_feature(enable = "sve")] +pub fn test() { + let x = svdup_n_f32(2f32); + let tup = svcreate2_f32(x, x); + let x = svget2_f32::<0>(tup); + let tup = svset2_f32::<0>(tup, x); +} diff --git a/tests/ui/scalable-vectors/cast-intrinsic.rs b/tests/ui/scalable-vectors/cast-intrinsic.rs new file mode 100644 index 0000000000000..f2157d8bcc14b --- /dev/null +++ b/tests/ui/scalable-vectors/cast-intrinsic.rs @@ -0,0 +1,65 @@ +//@ check-pass +//@ only-aarch64 +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features, improper_ctypes)] +#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)] + +use std::intrinsics::simd::scalable::sve_cast; + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +pub struct svbool_t(bool); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +pub struct svbool2_t(bool); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +pub struct svint64_t(i64); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(2)] +#[allow(non_camel_case_types)] +pub struct nxv2i16(i16); + +pub trait SveInto: Sized { + unsafe fn sve_into(self) -> T; +} + +impl SveInto for svbool_t { + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> svbool2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv2i1") + )] + fn convert_from_svbool(b: svbool_t) -> svbool2_t; + } + unsafe { convert_from_svbool(self) } + } +} + +#[target_feature(enable = "sve")] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} diff --git a/tests/ui/simd/masked-load-store-check-fail.stderr b/tests/ui/simd/masked-load-store-check-fail.stderr index 4e63d04a3b158..037855c8ec9ca 100644 --- a/tests/ui/simd/masked-load-store-check-fail.stderr +++ b/tests/ui/simd/masked-load-store-check-fail.stderr @@ -21,7 +21,7 @@ LL | | Simd::([9; 4]), LL | | ); | |_________^ note: function defined here - --> $SRC_DIR/core/src/intrinsics/simd.rs:LL:COL + --> $SRC_DIR/core/src/intrinsics/simd/mod.rs:LL:COL error[E0308]: mismatched types --> $DIR/masked-load-store-check-fail.rs:25:13 @@ -46,7 +46,7 @@ LL | | default, LL | | ); | |_________^ note: function defined here - --> $SRC_DIR/core/src/intrinsics/simd.rs:LL:COL + --> $SRC_DIR/core/src/intrinsics/simd/mod.rs:LL:COL error: aborting due to 2 previous errors diff --git a/triagebot.toml b/triagebot.toml index 86192295e0cdb..38ddab99bb130 100644 --- a/triagebot.toml +++ b/triagebot.toml @@ -1068,7 +1068,7 @@ instead. """ cc = ["@Amanieu", "@folkertdev", "@sayantn"] -[mentions."library/core/src/intrinsics/simd.rs"] +[mentions."library/core/src/intrinsics/simd/mod.rs"] message = """ Some changes occurred to the platform-builtins intrinsics. Make sure the LLVM backend as well as portable-simd gets adapted for the changes.