Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions cheri/tests/vtable.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#![no_std]

extern crate alloc;
extern crate cheriot;

use alloc::string::String;

trait Chair {
fn count_legs(&self) -> u32;
}

struct Monobloc {
dropme: String,
}

impl Chair for Monobloc {
fn count_legs(&self) -> u32 {
4
}
}

struct CuttyStool {}

impl Chair for CuttyStool {
fn count_legs(&self) -> u32 {
3
}
}

struct RockingChair {}

impl Chair for RockingChair {
fn count_legs(&self) -> u32 {
0
}
}

#[no_mangle]
extern "C" fn test_vtable() -> i32 {
fn observe_chair(chair: &dyn Chair) -> u32 {
chair.count_legs()
}
core::hint::black_box({
let chair = Monobloc { dropme: String::new() };
assert_eq!(observe_chair(&chair), 4);

let chair = CuttyStool {};
assert_eq!(observe_chair(&chair), 3);

let chair = RockingChair {};
assert_eq!(observe_chair(&chair), 0);
});
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the black_box() here meant to do? My understanding is that black_box() only effects values that pass through it, so passing it the result of this entire block is unlikely to prevent the contents of the block from being optimised away, but I think that should already not be an issue because of the assertions. I'm fairly sure assert_eq!() won't be removed by the compiler because it has side effects.

My thinking on the block still being optimised is based on this comment in the description of black_box():

Note that black_box has no effect on how its input is treated, only its output. As such, expressions passed to black_box may still be optimized [...]


0
}
58 changes: 34 additions & 24 deletions compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1440,32 +1440,35 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
) -> &'ll DIType {
let tcx = cx.tcx;

let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
let trait_ref = tcx.erase_and_anonymize_regions(trait_ref);

tcx.vtable_entries(trait_ref)
} else {
TyCtxt::COMMON_VTABLE_ENTRIES
};
// FIXME(xdoardo): Figure out to remove the overhead for the computations relative to the sizes
// of the entries happens only for targets that need it (i.e. those where sizeof(usize) !=
// sizeof(ptr)

// All function pointers are described as opaque pointers. This could be improved in the future
// by describing them as actual function pointers.
let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit);
let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
let usize_di_node = type_di_node(cx, tcx.types.usize);
let usize_layout = cx.layout_of(tcx.types.usize);
let pointer_layout = cx.layout_of(void_pointer_ty);
let pointer_size = pointer_layout.size;
let pointer_align = pointer_layout.align.abi;
// If `usize` is not pointer-sized and -aligned then the size and alignment computations
// for the vtable as a whole would be wrong. Let's make sure this holds even on weird
// platforms.
assert_eq!(cx.size_and_align_of(tcx.types.usize), (pointer_size, pointer_align));
let size_zero = Size::from_bits(0);

let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
let trait_ref = tcx.erase_and_anonymize_regions(trait_ref);

tcx.vtable_entries(trait_ref)
} else {
TyCtxt::COMMON_VTABLE_ENTRIES
};

let vtable_type_name =
compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::Type);
let unique_type_id = UniqueTypeId::for_vtable_ty(tcx, ty, poly_trait_ref);
let size = pointer_size * vtable_entries.len() as u64;
let entries_size = vtable_entries.iter().map(|e| e.memory_size(&tcx)).collect::<Vec<_>>();
let size = entries_size.iter().fold(size_zero, |a, s| a + *s);
let mut field_offset = size_zero;

// This gets mapped to a DW_AT_containing_type attribute which allows GDB to correlate
// the vtable to the type it is for.
Expand All @@ -1488,32 +1491,39 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
.iter()
.enumerate()
.filter_map(|(index, vtable_entry)| {
let (field_name, field_type_di_node) = match vtable_entry {
let (field_name, layout, field_type_di_node) = match vtable_entry {
ty::VtblEntry::MetadataDropInPlace => {
("drop_in_place".to_string(), void_pointer_type_di_node)
("drop_in_place".to_string(), pointer_layout, void_pointer_type_di_node)
}
ty::VtblEntry::Method(_) => {
// Note: This code does not try to give a proper name to each method
// because their might be multiple methods with the same name
// (coming from different traits).
(format!("__method{index}"), void_pointer_type_di_node)
(format!("__method{index}"), pointer_layout, void_pointer_type_di_node)
}
ty::VtblEntry::TraitVPtr(_) => (
format!("__super_trait_ptr{index}"),
pointer_layout,
void_pointer_type_di_node,
),
ty::VtblEntry::MetadataAlign => {
("align".to_string(), usize_layout, usize_di_node)
}
ty::VtblEntry::TraitVPtr(_) => {
(format!("__super_trait_ptr{index}"), void_pointer_type_di_node)
ty::VtblEntry::MetadataSize => {
("size".to_string(), usize_layout, usize_di_node)
}
ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
ty::VtblEntry::Vacant => return None,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think vacant entries still need to update field_offset for the offsets here to match the calculations in compiler/rustc_codegen_ssa/src/meth.rs and compiler/rustc_middle/src/ty/vtable.rs?

};

let field_offset = pointer_size * index as u64;
let current_offset = field_offset;
field_offset += entries_size[index];

Some(build_field_di_node(
cx,
vtable_type_di_node,
&field_name,
pointer_layout,
field_offset,
layout,
current_offset,
DIFlags::FlagZero,
field_type_di_node,
None,
Expand Down
14 changes: 13 additions & 1 deletion compiler/rustc_codegen_ssa/src/meth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,19 @@ impl<'a, 'tcx> VirtualIndex {

let llty = bx.fn_ptr_backend_type(fn_abi);
let ptr_size = bx.data_layout().pointer_size();
let vtable_byte_offset = self.0 * ptr_size.bytes();
let usize_size = bx.data_layout().pointer_offset();

// FIXME(xdoardo) Find a way to remove the overhead for this comparison too?
let vtable_byte_offset = if ptr_size == usize_size {
ptr_size.bytes() * self.0
} else {
match self.0 {
0 => 0,
1 => usize_size.bytes(),
2 => usize_size.bytes() * 2,
n => usize_size.bytes() * 2 + ptr_size.bytes() * (n - 2),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not clear where the magic number two comes from here, I think this needs a comment that explains what's happening. If this calculation is accounting for the entries in TyCtxt::COMMON_VTABLE_ENTRIES then it might also be nice to tie it into those values somehow, so any future changes there won't cause mysterious weirdness.

}
};

load_vtable(bx, llvtable, llty, vtable_byte_offset, ty, nonnull)
}
Expand Down
60 changes: 49 additions & 11 deletions compiler/rustc_middle/src/ty/vtable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,25 @@ pub enum VtblEntry<'tcx> {
TraitVPtr(TraitRef<'tcx>),
}

impl<'tcx> VtblEntry<'tcx> {
/// Return the [`rustc_abi::Size`] it takes to represent in-memory the given entry kind.
pub fn memory_size(&self, ctxt: &impl rustc_abi::HasDataLayout) -> rustc_abi::Size {
let dl = ctxt.data_layout();
match self {
VtblEntry::MetadataSize | VtblEntry::MetadataAlign => dl.pointer_offset(),
VtblEntry::MetadataDropInPlace
| VtblEntry::Method(_)
| VtblEntry::TraitVPtr(_)
| VtblEntry::Vacant => dl.pointer_size(),
}
}

/// Return the [`rustc_abi::Size`] of the data the given entry kind can contain.
pub fn data_size(&self, ctxt: &impl rustc_abi::HasDataLayout) -> rustc_abi::Size {
ctxt.data_layout().pointer_offset()
}
}

impl<'tcx> fmt::Debug for VtblEntry<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// We want to call `Display` on `Instance` and `PolyTraitRef`,
Expand Down Expand Up @@ -85,6 +104,11 @@ pub(super) fn vtable_allocation_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: (Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>),
) -> AllocId {

// FIXME(xdoardo): Figure out to remove the overhead for the computations relative to the sizes
// of the entries happens only for targets that need it (i.e. those where sizeof(usize) !=
// sizeof(ptr)

let (ty, poly_trait_ref) = key;

let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
Expand All @@ -106,19 +130,25 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let size = layout.size.bytes();
let align = layout.align.bytes();

let ptr_size = tcx.data_layout.pointer_size();
let ptr_capacity = tcx.data_layout.pointer_offset();
let usize_size = tcx.data_layout.pointer_offset();
let ptr_align = tcx.data_layout.pointer_align().abi;
let size_zero = rustc_abi::Size::from_bits(0);

let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
let entries_memory_size =
vtable_entries.iter().map(|e| e.memory_size(&tcx)).collect::<Vec<_>>();
let entries_data_size = vtable_entries.iter().map(|e| e.data_size(&tcx)).collect::<Vec<_>>();
let vtable_size = entries_memory_size.iter().fold(size_zero, |a, s| a + *s);
let mut vtable = Allocation::new(vtable_size, ptr_align, AllocInit::Uninit, ());
let mut field_offset = size_zero;

// No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
// We must check that the offsetting in steps of `usizes` does not break the alignment
// requirements of the other entries.
assert!(
usize_size.bits() % ptr_align.bits() == 0 || ptr_align.bits() % usize_size.bits() == 0,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this assertion is broken, it passes for cases like usize_size = 8 bits, pointer_align = 64 bits which I don't think is correct.

"usize_size: {usize_size:?}, ptr_alignment: {ptr_align:?}"
);

for (idx, entry) in vtable_entries.iter().enumerate() {
let idx: u64 = u64::try_from(idx).unwrap();
let scalar = match *entry {
VtblEntry::MetadataDropInPlace => {
if ty.needs_drop(tcx, ty::TypingEnv::fully_monomorphized()) {
Expand All @@ -130,9 +160,12 @@ pub(super) fn vtable_allocation_provider<'tcx>(
Scalar::from_maybe_pointer(Pointer::null(), &tcx)
}
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_capacity),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_capacity),
VtblEntry::Vacant => continue,
VtblEntry::MetadataSize => Scalar::from_uint(size, usize_size),
VtblEntry::MetadataAlign => Scalar::from_uint(align, usize_size),
VtblEntry::Vacant => {
field_offset += entries_memory_size[idx];
continue;
}
VtblEntry::Method(instance) => {
// Prepare the fn ptr we write into the vtable.
let fn_alloc_id = tcx.reserve_and_set_fn_alloc(instance, CTFE_ALLOC_SALT);
Expand All @@ -146,8 +179,13 @@ pub(super) fn vtable_allocation_provider<'tcx>(
Scalar::from_pointer(vptr, &tcx)
}
};

let current_offset = field_offset;
let field_data_size = entries_data_size[idx];
field_offset += entries_memory_size[idx];

vtable
.write_scalar(&tcx, alloc_range(ptr_size * idx, ptr_capacity), scalar)
.write_scalar(&tcx, alloc_range(current_offset, field_data_size), scalar)
.expect("failed to build vtable representation");
}

Expand Down
62 changes: 62 additions & 0 deletions tests/codegen-llvm/cheri/debug-vtable.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
//@ revisions: cheriot
//@ [cheriot] compile-flags: --target riscv32cheriot-unknown-cheriotrtos -Cdebuginfo=2 -Copt-level=0 -Csymbol-mangling-version=v0
//@ [cheriot] needs-llvm-components: riscv

#![crate_type = "lib"]
#![no_std]

extern crate alloc;
use alloc::boxed::Box;

// cheriot: @ptable
// Force emission for debuginfo for usize and *const() early..
pub static mut XYZ: Option<(usize, *const ())> = None;

pub struct Foo {
dropme: alloc::string::String,
}

pub trait SomeTrait {
fn method1(&self) -> u32;
fn method2(&self) -> u32;
}

impl SomeTrait for Foo {
fn method1(&self) -> u32 {
1
}
fn method2(&self) -> u32 {
2
}
}

pub trait SomeTraitWithGenerics<T, U> {
fn method1(&self) -> (T, U);
}

impl SomeTraitWithGenerics<u64, i8> for Foo {
fn method1(&self) -> (u64, i8) {
(1, 2)
}
}

pub fn foo(x: &Foo) -> (u32, (u64, i8), &dyn Send) {
let y: &dyn SomeTrait = x;
let z: &dyn SomeTraitWithGenerics<u64, i8> = x;
(y.method1(), z.method1(), x as &dyn Send)
}

// Constructing the debuginfo name for the FnOnce vtable below initially caused an ICE on MSVC
// because the trait type contains a late bound region that needed to be erased before the type
// layout for the niche enum `Option<&dyn Fn()>` could be computed.
pub fn bar() -> Box<dyn FnOnce(Option<&dyn Fn()>)> {
Box::new(|_x: Option<&dyn Fn()>| {})
}

fn generic_closure<T: 'static>(x: T) -> Box<dyn FnOnce() -> T> {
Box::new(move || x)
}

pub fn instantiate_generic_closures() -> (Box<dyn FnOnce() -> u32>, Box<dyn FnOnce() -> bool>) {
(generic_closure(1u32), generic_closure(false))
}