diff --git a/README.md b/README.md index 2482799..b1feca6 100644 --- a/README.md +++ b/README.md @@ -3,15 +3,31 @@ A Rust library that wraps low-level bindings to the [`lean.h`](https://github.com/leanprover/lean4/blob/master/src/include/lean/lean.h) Lean C library with a high-level API for safe and ergonomic FFI from Lean to -Rust. This allows the user to focus on the actual Rust logic rather than -manually manipulating pointers and keeping track of Lean reference counts. +Rust. This allows the user to focus on the actual Rust logic rather than manual +pointer manipulation and keeping track of Lean reference counts. -The Rust bindings are auto-generated with +The raw Rust bindings are auto-generated with [`rust-bindgen`](https://github.com/rust-lang/rust-bindgen). Bindgen runs in `build.rs` and generates unsafe Rust functions that link to `lean.h`. This module can be found at `target/release/build/lean-ffi-/out/lean.rs` after running `cargo build --release`. +## Features + +- **RAII refcounting** for `LeanOwned` owned references via Rust `Clone` and + `Drop` +- **Lifetime bounds** for `LeanBorrowed` borrowed references to prevent + use-after-free +- **Thread-safe shared references** via `LeanShared` (`lean_mark_mt` + + `Send + Sync`). +- **Typed domain wrappers** e.g. `LeanArray`, `LeanString` etc. with safe Rust + methods +- **`Nat` conversions** via `num-bigint`, handling tagged scalars and big-ints +- **`lean_inductive!` macro** for Lean `structure` / `inductive` types, + generating easy accessor methods without manually tracking byte offsets +- **Safe external objects** via `ExternalClass::register_with_drop::()` and + borrow-bound `LeanExternal::get(&self) -> &T`. + ## Background: Lean Ownership Model In Lean's C API, a **reference** is a `lean_object*` pointer to the header of a @@ -102,107 +118,117 @@ becomes `LeanPoint` in Rust. #### Defining custom domain types -Use the `lean_domain_type!` macro to define newtypes for your Lean types: +`lean_inductive!` wraps a Lean `structure` or `inductive` with its full +per-constructor layout. It emits a `#[repr(transparent)]` newtype, a +[`LeanCtorLayout`] impl whose `LAYOUTS` slice has one entry per constructor +(indexed by tag), and typed accessors that bounds-check against the current +variant's layout: + +- `alloc(tag: u8)` — `lean_alloc_ctor` for a specific variant. +- `get_obj(i)` / `set_obj(i, val)` — object fields. +- `get_usize(i)` / `set_usize(i, val)` — `USize` fields. +- `get_num_{64,32,16,8}(i)` / `set_num_{64,32,16,8}(i, val)` — scalar fields. + +Variant layouts are listed inside `[ … ]`, in tag order. + +**Structure** — from `structure Point where x : Nat; y : Nat`: ```rust -lean_ffi::lean_domain_type! { - /// Lean `Point` — structure Point where x : Nat; y : Nat - LeanPoint; - /// Lean `PutResponse` — structure PutResponse where message : String; hash : String - LeanPutResponse; -} +lean_ffi::lean_inductive! { LeanPoint [ { num_obj: 2 } ] } + +let p = LeanPoint::alloc(0); +p.set_obj(0, x_nat); +p.set_obj(1, y_nat); ``` -This generates a `#[repr(transparent)]` wrapper with `Clone`, `Copy` for -`LeanBorrowed`, `self.inner()`, `self.as_raw()`, `self.into_raw()`, and `From` -impls. You can then add accessor methods — readers are generic over `R: LeanRef` -(work on both owned and borrowed), constructors return `LeanOwned`: +**Inductive** — from: + +```lean +inductive CompareResult + | matched + | mismatch (leanSize rustSize : UInt64) + | notFound +``` ```rust -impl LeanPutResponse { - pub fn message(&self) -> LeanBorrowed<'_> { - self.as_ctor().get(0) // borrowed ref into the object, no lean_inc - } - pub fn hash(&self) -> LeanBorrowed<'_> { - self.as_ctor().get(1) - } +lean_ffi::lean_inductive! { + LeanCompareResult [ + { }, // matched + { num_64: 2 }, // mismatch + { }, // notFound + ] } -impl LeanPutResponse { - pub fn mk(message: &str, hash: &str) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, LeanString::new(message)); - ctor.set(1, LeanString::new(hash)); - Self::new(ctor.into()) +// Build: +let m = LeanCompareResult::alloc(1); +m.set_num_64(0, lean_size); +m.set_num_64(1, rust_size); + +// Read — dispatch on the Lean tag, then access fields: +match result.as_ctor().tag() { + 1 => { + let (l, r) = (result.get_num_64(0), result.get_num_64(1)); } + _ => { /* matched | notFound */ } } ``` -### Inductive types and field layout +For wrappers without a ctor layout (opaque externals, types represented as +`lean_box(n)`, etc.) use the lower-level `lean_domain_type!` macro. + +[`LeanCtorLayout`]: https://docs.rs/lean-ffi/latest/lean_ffi/object/trait.LeanCtorLayout.html + +### Constructor field layout -Extra care must be taken when dealing with -[inductive types](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) -as the runtime memory layout of constructor fields may not match the declaration -order in Lean. Fields are reordered into three groups: +Lean +[reorders constructor fields](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) +at runtime. Declaration order does **not** match memory order. For every +constructor, fields are laid out in this order: -1. Non-scalar fields (`lean_object*`), in declaration order -2. `USize` fields, in declaration order -3. Other scalar fields, in decreasing order by size, then declaration order - within each size +1. Object fields (`lean_object*`) — declaration order. +2. `USize` fields — declaration order. +3. Fixed-size scalars — descending size (8B → 4B → 2B → 1B), then declaration + order within each size. -This means a structure like +So for ```lean structure MyStruct where - u8val : UInt8 - obj : Nat + u8val : UInt8 + obj : Nat u32val : UInt32 u64val : UInt64 ``` -would be laid out as `[obj, u64val, u32val, u8val]` at runtime. Trivial wrapper -types (e.g. `Char` wraps `UInt32`) count as their underlying scalar type. +the runtime order is `[obj, u64val, u32val, u8val]`. Trivial wrappers (e.g. +`Char` over `UInt32`) count as their underlying scalar. -A constructor's memory looks like: +Memory: ``` -[header (8B)] [object fields (8B each)] [USize fields (8B each)] [scalar data area] +[header 8B] [object fields, 8B each] [USize fields, 8B each] [scalar bytes, descending size] ``` -Object fields and USize fields each occupy 8-byte slots. The scalar data area is -a flat region of bytes containing all remaining scalar field values, packed by -descending size. For `MyStruct` (1 object field, 0 USize fields, 13 scalar -bytes): +For `MyStruct` (`num_obj=1`, `num_usize=0`, `num_64=1`, `num_32=1`, `num_8=1`): -- `u64val` occupies bytes 0–7 of the scalar area -- `u32val` occupies bytes 8–11 -- `u8val` occupies byte 12 +- `u64val` at scalar bytes 0–7 +- `u32val` at scalar bytes 8–11 +- `u8val` at scalar byte 12 -Use `LeanCtor` to access fields at the correct positions. Scalar getters and -setters take `(num_slots, byte_offset)` — `num_slots` is the total number of -8-byte slots (object fields + USize fields) preceding the scalar data area, and -`byte_offset` is the position of the field within that area. +`lean_inductive!` takes the per-size field counts and hands you size-indexed +accessors — `get_num_64(0)` for the first 8-byte scalar, `get_num_8(0)` for the +first 1-byte scalar, etc. No hand-rolled byte offsets: ```rust -impl LeanScalarStruct { - pub fn obj(&self) -> LeanBorrowed<'_> { self.as_ctor().get(0) } - pub fn u64val(&self) -> u64 { self.as_ctor().get_u64(1, 0) } - pub fn u32val(&self) -> u32 { self.as_ctor().get_u32(1, 8) } - pub fn u8val(&self) -> u8 { self.as_ctor().get_u8(1, 12) } -} - -impl LeanScalarStruct { - pub fn mk(obj: LeanNat, u64val: u64, u32val: u32, u8val: u8) -> Self { - let ctor = LeanCtor::alloc(0, 1, 13); // tag 0, 1 obj field, 13 scalar bytes - ctor.set(0, obj); // object field 0 - ctor.set_u64(1, 0, u64val); // 1 slot before scalars, byte 0 - ctor.set_u32(1, 8, u32val); // 1 slot before scalars, byte 8 - ctor.set_u8(1, 12, u8val); // 1 slot before scalars, byte 12 - Self::new(ctor.into()) - } +lean_ffi::lean_inductive! { + LeanMyStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] } ``` +For raw access (non-standard layouts, hand-tuned code), `LeanCtor` exposes +`get_u{8,16,32,64}(offset)` / `set_u{8,16,32,64}(offset, val)` with absolute +byte offsets matching `lean_ctor_get_uint*` / `lean_ctor_set_uint*`. + ### External objects (`LeanExternal`) External objects let you store arbitrary Rust data inside a Lean object. Lean diff --git a/Tests/FFI.lean b/Tests/FFI.lean index d43a1f2..f131355 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -64,6 +64,28 @@ opaque roundtripExtScalarStruct : @& ExtScalarStruct → ExtScalarStruct @[extern "rs_roundtrip_usize_struct"] opaque roundtripUSizeStruct : @& USizeStruct → USizeStruct +@[extern "rs_roundtrip_usize_mixed_struct"] +opaque roundtripUSizeMixedStruct : @& USizeMixedStruct → USizeMixedStruct + +@[extern "rs_roundtrip_bool_struct"] +opaque roundtripBoolStruct : @& BoolStruct → BoolStruct + + +@[extern "rs_roundtrip_multi_u32_struct"] +opaque roundtripMultiU32Struct : @& MultiU32Struct → MultiU32Struct + +@[extern "rs_roundtrip_test_inductive"] +opaque roundtripTestInductive : @& TestInductive → TestInductive + +@[extern "rs_roundtrip_outer"] +opaque roundtripOuter : @& Outer → Outer + +@[extern "rs_roundtrip_inductive_holder"] +opaque roundtripInductiveHolder : @& InductiveHolder → InductiveHolder + +@[extern "rs_roundtrip_struct_in_variant"] +opaque roundtripStructInVariant : @& StructInVariant → StructInVariant + @[extern "rs_roundtrip_float"] opaque roundtripFloat : Float → Float @@ -359,6 +381,34 @@ def borrowedRoundtripTests : TestSeq := test "ExtScalarStruct max" (show Bool from roundtripExtScalarStruct ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩ == ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩) ++ test "USizeStruct zeros" (roundtripUSizeStruct ⟨0, 0, 0⟩ == ⟨0, 0, 0⟩) ++ test "USizeStruct mixed" (roundtripUSizeStruct ⟨42, 99, 255⟩ == ⟨42, 99, 255⟩) ++ + -- USizeMixedStruct declaration order: obj, u64val, bval, uval, u32val + test "USizeMixedStruct zeros" (roundtripUSizeMixedStruct ⟨0, 0, false, 0, 0⟩ == ⟨0, 0, false, 0, 0⟩) ++ + test "USizeMixedStruct mixed" (roundtripUSizeMixedStruct ⟨42, 0xFFFFFFFFFFFFFFFF, true, 99, 0xFFFFFFFF⟩ == ⟨42, 0xFFFFFFFFFFFFFFFF, true, 99, 0xFFFFFFFF⟩) ++ + test "BoolStruct all false" (roundtripBoolStruct ⟨0, false, false, false⟩ == ⟨0, false, false, false⟩) ++ + test "BoolStruct all true" (roundtripBoolStruct ⟨42, true, true, true⟩ == ⟨42, true, true, true⟩) ++ + test "BoolStruct mixed" (roundtripBoolStruct ⟨7, true, false, true⟩ == ⟨7, true, false, true⟩) ++ + test "MultiU32Struct zeros" (roundtripMultiU32Struct ⟨0, 0, 0, 0⟩ == ⟨0, 0, 0, 0⟩) ++ + test "MultiU32Struct max" (roundtripMultiU32Struct ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩ == ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩) ++ + test "MultiU32Struct mixed" (roundtripMultiU32Struct ⟨1, 42, 0, 99⟩ == ⟨1, 42, 0, 99⟩) ++ + -- TestInductive: multi-variant inductive with non-zero tags + test "TestInductive empty" (roundtripTestInductive .empty == .empty) ++ + test "TestInductive withScalars" (roundtripTestInductive (.withScalars 42 99) == .withScalars 42 99) ++ + test "TestInductive withScalars max" (roundtripTestInductive (.withScalars 0xFFFFFFFFFFFFFFFF 0) == .withScalars 0xFFFFFFFFFFFFFFFF 0) ++ + test "TestInductive withMixed" (roundtripTestInductive (.withMixed 7 0xFFFFFFFF true) == .withMixed 7 0xFFFFFFFF true) ++ + test "TestInductive withMixed false" (roundtripTestInductive (.withMixed 0 0 false) == .withMixed 0 0 false) ++ + -- Outer: structure containing another structure + test "Outer zeros" (roundtripOuter ⟨⟨0, 0, 0, 0⟩, "", 0⟩ == ⟨⟨0, 0, 0, 0⟩, "", 0⟩) ++ + test "Outer mixed" (roundtripOuter ⟨⟨42, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩, "hello", 99⟩ == ⟨⟨42, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩, "hello", 99⟩) ++ + -- InductiveHolder: structure containing an inductive + test "InductiveHolder empty" (roundtripInductiveHolder ⟨.empty, 0⟩ == ⟨.empty, 0⟩) ++ + test "InductiveHolder withScalars" (roundtripInductiveHolder ⟨.withScalars 10 20, 42⟩ == ⟨.withScalars 10 20, 42⟩) ++ + test "InductiveHolder withMixed" (roundtripInductiveHolder ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩ == ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩) ++ + -- StructInVariant: inductive whose variants contain structures + test "StructInVariant empty" (roundtripStructInVariant .empty == .empty) ++ + test "StructInVariant withPoint zeros" (roundtripStructInVariant (.withPoint ⟨0, 0⟩) == .withPoint ⟨0, 0⟩) ++ + test "StructInVariant withPoint mixed" (roundtripStructInVariant (.withPoint ⟨42, 99⟩) == .withPoint ⟨42, 99⟩) ++ + test "StructInVariant withScalar zeros" (roundtripStructInVariant (.withScalar ⟨0, 0, 0, 0⟩ 0) == .withScalar ⟨0, 0, 0, 0⟩ 0) ++ + test "StructInVariant withScalar max" (roundtripStructInVariant (.withScalar ⟨1, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩ 0xFFFFFFFF) == .withScalar ⟨1, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩ 0xFFFFFFFF) ++ test "External all fields" (externalAllFields (mkRustData 42 99 "hello") == "42:99:hello") ++ test "External all fields zeros" (externalAllFields (mkRustData 0 0 "") == "0:0:") ++ test "External large u64" (rustDataGetX (mkRustData 0xFFFFFFFFFFFFFFFF 0 "test") == 0xFFFFFFFFFFFFFFFF) ++ diff --git a/Tests/Gen.lean b/Tests/Gen.lean index 59337e8..6fcdfbe 100644 --- a/Tests/Gen.lean +++ b/Tests/Gen.lean @@ -122,6 +122,74 @@ structure USizeStruct where u8val : UInt8 deriving Repr, BEq, DecidableEq, Inhabited +/-- Structure mixing USize with other scalar types. + Declaration order deliberately differs from memory layout to test + that FFI code handles Lean's field reordering correctly. + Declaration: obj, u64val, bval, uval, u32val + Memory layout: [obj] [uval (usize slot)] [u64val @ +0] [u32val @ +8] [bval @ +12] + Total scalar size = 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21. -/ +structure USizeMixedStruct where + obj : Nat + u64val : UInt64 + bval : Bool + uval : USize + u32val : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure with multiple Bool scalar fields for batch-read/write testing. + Layout: 1 object field (obj : Nat), then 3 Bool scalars. -/ +structure BoolStruct where + obj : Nat + b1 : Bool + b2 : Bool + b3 : Bool +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure with multiple UInt32 scalar fields for batch-read/write testing. + Layout: 1 object field (obj : Nat), then 3 UInt32 scalars. -/ +structure MultiU32Struct where + obj : Nat + a : UInt32 + b : UInt32 + c : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Inductive with multiple constructors for testing non-zero TAG. + - empty: tag 0, no fields (scalar representation) + - withScalars: tag 1, 2 u64 fields + - withMixed: tag 2, 1 obj field + 1 u32 + 1 bool -/ +inductive TestInductive + | empty + | withScalars (a b : UInt64) + | withMixed (obj : Nat) (x : UInt32) (flag : Bool) +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure containing another structure as an object field. + Tests nested structure access via the trait API. -/ +structure Outer where + inner : ScalarStruct + label : String + count : UInt64 +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure containing an inductive as an object field. -/ +structure InductiveHolder where + value : TestInductive + tag_copy : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Inductive whose variants carry structure-typed fields. + Tests that variant accessors work when the variant's field is itself a + ctor-backed Lean structure. + - empty: tag 0, no fields (scalar representation) + - withPoint: tag 1, 1 object field (a `Point`) + - withScalar: tag 2, 1 object field (a `ScalarStruct`) + 1 UInt32 -/ +inductive StructInVariant + | empty + | withPoint (p : Point) + | withScalar (s : ScalarStruct) (extra : UInt32) +deriving Repr, BEq, DecidableEq, Inhabited + /-! ## Shrinkable instances -/ instance : Shrinkable Nat where diff --git a/src/lib.rs b/src/lib.rs index bfc274c..58a525a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -54,19 +54,23 @@ pub fn inc_heartbeat() { /// Must only be used as a `lean_external_foreach_fn` callback. pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_object) {} -/// Generate a `#[repr(transparent)]` newtype over a `LeanRef` type parameter -/// for a specific Lean type, with Clone, conditional Copy, from_raw, into_raw, and From impls. +/// Declare a `#[repr(transparent)]` wrapper `Ty` for a Lean domain +/// type, with `inner`, `as_raw`, `as_ctor`, `from_ctor`, `new`, `into_raw`, +/// `Clone`, conditional `Copy`, and `From> for LeanOwned`. /// -/// # Naming convention +/// This is the low-level primitive for wrappers that are **not** ctor-backed +/// — opaque externals, types represented by a tagged scalar (`lean_box(n)`), +/// or wrappers whose layout is attached from a different module. For Lean +/// `structure` / `inductive` types, use [`lean_inductive!`] instead; it calls +/// this macro and also attaches the layout and typed accessors. /// -/// Domain types should be prefixed with `Lean` to distinguish them from Lean-side types -/// and to match the built-in types (`LeanArray`, `LeanString`, `LeanNat`, etc.). -/// For example, a Lean `Point` structure becomes `LeanPoint` in Rust: +/// Wrapper names are prefixed with `Lean` to match the built-ins (`LeanArray`, +/// `LeanString`, `LeanNat`, …). /// /// ```ignore /// lean_domain_type! { -/// /// Lean `Point` — structure Point where x : Nat; y : Nat -/// LeanPoint; +/// /// Rust handle wrapped as an opaque Lean `@[extern_type]`. +/// LeanRustData; /// } /// ``` #[macro_export] @@ -99,6 +103,14 @@ macro_rules! lean_domain_type { } } + impl<'a> $name<$crate::object::LeanBorrowed<'a>> { + /// Wrap a borrowed `LeanCtor` as this domain type. + #[inline] + pub fn from_ctor(ctor: $crate::object::LeanCtor<$crate::object::LeanBorrowed<'a>>) -> Self { + Self(unsafe { $crate::object::LeanBorrowed::from_raw(ctor.as_raw()) }) + } + } + impl $name<$crate::object::LeanOwned> { /// Wrap an owned `LeanOwned` value. #[inline] @@ -125,3 +137,211 @@ macro_rules! lean_domain_type { } )*}; } + +/// Declare a wrapper for a Lean `structure` or `inductive` with its full +/// per-constructor layout. +/// +/// Emits a [`lean_domain_type!`] wrapper, an [`LeanCtorLayout`](object::LeanCtorLayout) +/// impl whose `LAYOUTS` slice has one entry per Lean constructor (indexed by +/// tag), plus these inherent methods: +/// +/// - `alloc(tag: u8)` — `lean_alloc_ctor` for the given variant. +/// - `get_obj(i)` / `set_obj(i, val)` +/// - `get_usize(i)` / `set_usize(i, val)` +/// - `get_num_{64,32,16,8}(i)` / `set_num_{64,32,16,8}(i, val)` +/// +/// Accessors read the object's actual tag from its header, look up +/// `LAYOUTS[tag]`, bounds-check the field index against that variant's +/// counts, and compute byte offsets. Within each scalar size (8B / 4B / 2B / +/// 1B), field indices follow Lean's declaration order. +/// +/// A `structure` is the one-variant case. Typical call, using the `Point` +/// from `structure Point where x : Nat; y : Nat`: +/// +/// ```ignore +/// lean_inductive! { +/// /// Lean `Point` — see `Tests/Gen.lean`. +/// LeanPoint [ { num_obj: 2 } ] +/// } +/// +/// let p = LeanPoint::alloc(0); +/// p.set_obj(0, x_nat); +/// p.set_obj(1, y_nat); +/// ``` +/// +/// Multi-variant — wrapping this Lean inductive: +/// +/// ```ignore +/// // Lean side: +/// // inductive BlockCompareResult +/// // | matched +/// // | mismatch (leanSize rustSize firstDiff : UInt64) +/// // | notFound +/// +/// lean_inductive! { +/// LeanBlockCompareResult [ +/// { }, // matched +/// { num_64: 3 }, // mismatch +/// { }, // notFound +/// ] +/// } +/// +/// // Build: +/// let m = LeanBlockCompareResult::alloc(1); +/// m.set_num_64(0, lean_size); +/// m.set_num_64(1, rust_size); +/// m.set_num_64(2, first_diff); +/// +/// // Read: +/// match result.as_ctor().tag() { +/// 1 => { +/// let (l, r, d) = (result.get_num_64(0), result.get_num_64(1), result.get_num_64(2)); +/// } +/// _ => { /* matched | notFound */ } +/// } +/// ``` +/// +/// `alloc(tag)` and every accessor panic if the index is out of range for +/// the current variant. +/// +/// Multiple types can share one invocation, separated by `;`: +/// +/// ```ignore +/// lean_inductive! { +/// LeanPoint [ { num_obj: 2 } ]; +/// LeanNatTree [ +/// { num_obj: 1 }, // leaf +/// { num_obj: 2 }, // node +/// ]; +/// } +/// ``` +#[macro_export] +macro_rules! lean_inductive { + ( + $( + $(#[$top_meta:meta])* + $top:ident [ + $( { $($key:ident : $val:expr),* $(,)? } ),+ $(,)? + ] + );+ $(;)? + ) => { + $( + $crate::lean_domain_type! { $(#[$top_meta])* $top; } + + impl $crate::object::LeanCtorLayout for $top { + const LAYOUTS: &'static [$crate::object::SingleCtorLayout] = &[ + $( + $crate::object::SingleCtorLayout { + $($key: $val,)* + ..$crate::object::SingleCtorLayout::ZERO + }, + )+ + ]; + } + + impl $top { + /// Layout of the variant this object currently holds (read from the + /// ctor's tag in its object header). + #[doc(hidden)] + #[inline] + fn __variant_layout(&self) -> $crate::object::SingleCtorLayout { + let tag = self.as_ctor().tag() as usize; + ::LAYOUTS[tag] + } + + /// Get a borrowed reference to the `i`-th object field. + pub fn get_obj(&self, i: usize) -> $crate::object::LeanBorrowed<'_> { + let l = self.__variant_layout(); + assert!(i < l.num_obj, "object field {i} out of bounds (num_obj = {})", l.num_obj); + let raw = unsafe { + $crate::include::lean_ctor_get(self.as_raw(), $crate::object::to_u32(i)) + }; + unsafe { $crate::object::LeanBorrowed::from_raw(raw) } + } + + /// Set the `i`-th object field. Takes ownership of `val`. + pub fn set_obj(&self, i: usize, val: impl Into<$crate::object::LeanOwned>) { + let l = self.__variant_layout(); + assert!(i < l.num_obj, "object field {i} out of bounds (num_obj = {})", l.num_obj); + let val: $crate::object::LeanOwned = val.into(); + unsafe { + $crate::include::lean_ctor_set( + self.as_raw(), + $crate::object::to_u32(i), + val.into_raw(), + ); + } + } + + pub fn get_usize(&self, i: usize) -> usize { + let l = self.__variant_layout(); + assert!(i < l.num_usize, "USize field {i} out of bounds (num_usize = {})", l.num_usize); + self.as_ctor().get_usize(i) + } + pub fn set_usize(&self, i: usize, val: usize) { + let l = self.__variant_layout(); + assert!(i < l.num_usize, "USize field {i} out of bounds (num_usize = {})", l.num_usize); + self.as_ctor().set_usize(i, val) + } + + pub fn get_num_64(&self, i: usize) -> u64 { + let l = self.__variant_layout(); + assert!(i < l.num_64, "64-bit field {i} out of bounds (num_64 = {})", l.num_64); + self.as_ctor().get_u64(l.offset_64(i)) + } + pub fn set_num_64(&self, i: usize, val: u64) { + let l = self.__variant_layout(); + assert!(i < l.num_64, "64-bit field {i} out of bounds (num_64 = {})", l.num_64); + self.as_ctor().set_u64(l.offset_64(i), val) + } + + pub fn get_num_32(&self, i: usize) -> u32 { + let l = self.__variant_layout(); + assert!(i < l.num_32, "32-bit field {i} out of bounds (num_32 = {})", l.num_32); + self.as_ctor().get_u32(l.offset_32(i)) + } + pub fn set_num_32(&self, i: usize, val: u32) { + let l = self.__variant_layout(); + assert!(i < l.num_32, "32-bit field {i} out of bounds (num_32 = {})", l.num_32); + self.as_ctor().set_u32(l.offset_32(i), val) + } + + pub fn get_num_16(&self, i: usize) -> u16 { + let l = self.__variant_layout(); + assert!(i < l.num_16, "16-bit field {i} out of bounds (num_16 = {})", l.num_16); + self.as_ctor().get_u16(l.offset_16(i)) + } + pub fn set_num_16(&self, i: usize, val: u16) { + let l = self.__variant_layout(); + assert!(i < l.num_16, "16-bit field {i} out of bounds (num_16 = {})", l.num_16); + self.as_ctor().set_u16(l.offset_16(i), val) + } + + pub fn get_num_8(&self, i: usize) -> u8 { + let l = self.__variant_layout(); + assert!(i < l.num_8, "8-bit field {i} out of bounds (num_8 = {})", l.num_8); + self.as_ctor().get_u8(l.offset_8(i)) + } + pub fn set_num_8(&self, i: usize, val: u8) { + let l = self.__variant_layout(); + assert!(i < l.num_8, "8-bit field {i} out of bounds (num_8 = {})", l.num_8); + self.as_ctor().set_u8(l.offset_8(i), val) + } + } + + impl $top<$crate::object::LeanOwned> { + /// Allocate a new constructor object for the given tag (variant). + /// + /// Panics if `tag` is out of range for this type's `LAYOUTS`. + pub fn alloc(tag: u8) -> Self { + let layouts = ::LAYOUTS; + let layout = layouts[tag as usize]; + Self::new( + $crate::object::LeanCtor::alloc(tag, layout.num_obj, layout.scalar_size()) + .into(), + ) + } + } + )+ + }; +} diff --git a/src/object.rs b/src/object.rs index ac4158f..1497534 100644 --- a/src/object.rs +++ b/src/object.rs @@ -36,6 +36,12 @@ use tags::*; /// Constructor tag for `IO.Error.userError`. const IO_ERROR_USER_ERROR_TAG: u8 = 7; +/// Convert a `usize` to `u32` for the Lean C API, panicking on overflow. +#[inline] +pub fn to_u32(val: usize) -> u32 { + u32::try_from(val).expect("value exceeds u32::MAX") +} + // ============================================================================= // LeanRef trait — shared interface for owned and borrowed references // ============================================================================= @@ -896,6 +902,109 @@ impl From> for LeanOwned { // [obj_0, obj_1, ..., obj_{n-1}] [usize_0, ...] [scalar bytes (descending size)] /// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). +/// +/// # Overall structure +/// +/// In C, a constructor is: +/// ```c +/// typedef struct { +/// lean_object m_header; // 8 bytes: tag in m_tag, obj count in m_other +/// lean_object * m_objs[]; // flexible array: data area starts here +/// } lean_ctor_object; +/// ``` +/// +/// `m_objs` is a C flexible array member — it has no compile-time size. +/// `lean_alloc_ctor(tag, num_objs, scalar_sz)` over-allocates so that the +/// region starting at `m_objs` is large enough for `num_objs` pointer-width +/// entries **plus** `scalar_sz` additional bytes for scalar fields. Despite +/// the `lean_object *` element type, the trailing bytes are not pointers — +/// the C API accesses them by casting to `uint8_t*` and indexing by byte +/// offset. `lean_ctor_obj_cptr` returns `&m_objs[0]`; all offsets in the +/// accessor API are relative to this address. +/// +/// # Constructor field kinds +/// +/// Lean constructor fields are either: +/// +/// - **Object fields**: `lean_object*` values — either a pointer to a heap +/// object (lowest bit 0) or a boxed immediate value (lowest bit 1). +/// +/// - **Scalar fields**: values stored inline as their unboxed C types +/// (`uint8_t`, `uint32_t`, `double`, etc.) rather than as `lean_object*`. +/// For example, `Bool` is stored as an unboxed `uint8_t` when it appears +/// directly in a `lean_ctor_object`, but +/// as a boxed `lean_object*` in polymorphic contexts like `Array Bool`. +/// +/// # Data area layout +/// +/// **Important:** Lean reorders fields by kind and size, so the memory layout +/// may differ from the declaration order in Lean source. The data area +/// (accessed via `lean_ctor_obj_cptr`) is laid out as three regions, following +/// the terminology from the +/// [Lean reference manual](https://lean-lang.org/doc/reference/latest/): +/// +/// 1. **Object fields** — pointer-width `lean_object*` entries, one per field +/// ("fields of the first kind") +/// 2. **`USize` fields** — pointer-width `usize` entries, one per field +/// ("fields of the second kind"); together with object fields these form +/// a contiguous array of pointer-width entries +/// 3. **Fixed-size scalar fields** — laid out in descending size order +/// (u64/f64, u32/f32, u16, u8/bool) +/// +/// For example, a Lean structure declared as: +/// ```text +/// structure Foo where +/// name : String -- object field +/// flag : Bool -- scalar (u8) +/// count : UInt64 -- scalar (u64) +/// size : USize -- usize field +/// ``` +/// is reordered and laid out as (assuming 64-bit pointers): +/// ```text +/// lean_ctor_obj_cptr ──> +/// byte 0..7: name (lean_object*) ← m_objs[0], object field +/// byte 8..15: size (usize) ← m_objs[1], USize field +/// byte 16..23: count (u64) ← scalar field +/// byte 24: flag (u8) ← scalar field +/// ``` +/// `lean_alloc_ctor(tag, num_objs=1, scalar_sz=9)` allocates enough space +/// for the header, 1 object pointer, and 9 scalar bytes (8 for the USize +/// entry + 8 for the u64 + 1 for the u8). The scalar fields live past the +/// end of the pointer-width entries; the C API accesses them by casting +/// `lean_ctor_obj_cptr` to `uint8_t*` and indexing by byte offset. +/// +/// # Offset conventions +/// +/// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), the +/// `LeanCtor` methods take `offset` as an **absolute byte offset** from +/// `lean_ctor_obj_cptr` — the same convention as the Lean C API functions +/// `lean_ctor_get_uint8`, `lean_ctor_set_uint32`, etc. +/// +/// For `usize`, the accessor indexes into the pointer-width entry array +/// described above. The index is relative to the first `USize` entry; +/// the object field count is read from the header and added internally, so +/// `get_usize(0)` reads the first `USize` field. +/// +/// Reading all fields of `Foo` in C vs. via [`lean_inductive!`](crate::lean_inductive) +/// (which computes byte offsets from declared field counts): +/// +/// ```c +/// // C — `lean_ctor_get`/`get_usize` take a pointer-sized field index; +/// // `lean_ctor_get_uint*` takes an absolute byte offset from `lean_ctor_obj_cptr`. +/// lean_object* name = lean_ctor_get(o, 0); // object field 0 +/// size_t size = lean_ctor_get_usize(o, 1); // USize field at index (num_objs + 0) +/// uint64_t count = lean_ctor_get_uint64(o, 16); // (1 obj + 1 usize) * 8 +/// uint8_t flag = lean_ctor_get_uint8(o, 24); // 16 + sizeof(uint64_t) +/// ``` +/// +/// ```ignore +/// // Rust — from a `lean_inductive! { LeanFoo [ { num_obj: 1, num_usize: 1, +/// // num_64: 1, num_8: 1 } ] }`: +/// let name = foo.get_obj(0); +/// let size = foo.get_usize(0); +/// let count = foo.get_num_64(0); +/// let flag = foo.get_num_8(0); +/// ``` #[repr(transparent)] pub struct LeanCtor(R); @@ -921,9 +1030,8 @@ impl LeanCtor { /// Get a borrowed reference to the `i`-th object field. pub fn get(&self, i: usize) -> LeanBorrowed<'_> { - #[allow(clippy::cast_possible_truncation)] LeanBorrowed( - unsafe { include::lean_ctor_get(self.0.as_raw(), i as u32) }, + unsafe { include::lean_ctor_get(self.0.as_raw(), to_u32(i)) }, PhantomData, ) } @@ -937,56 +1045,97 @@ impl LeanCtor { // ------------------------------------------------------------------------- // Scalar field readers // ------------------------------------------------------------------------- - // - // `num_objs` is the number of non-scalar fields (object + usize) preceding - // the scalar area. `offset` is a byte offset within the scalar area. - // For `get_usize`, `slot` is a slot index (not byte offset). - /// Compute the absolute byte offset for a scalar field. - #[allow(clippy::cast_possible_truncation)] + /// Number of object fields, read from the constructor header. #[inline] - fn scalar_offset(num_objs: usize, offset: usize) -> u32 { - (num_objs * 8 + offset) as u32 + pub fn num_objs(&self) -> usize { + unsafe { include::lean_ctor_num_objs(self.0.as_raw()) as usize } + } + + /// All scalar accessors below take `offset` as an absolute byte offset + /// from `lean_ctor_obj_cptr`, matching the Lean C API convention for + /// `lean_ctor_get_uint8`, `lean_ctor_get_uint32`, etc. + pub fn get_u8(&self, offset: usize) -> u8 { + unsafe { include::lean_ctor_get_uint8(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_u16(&self, offset: usize) -> u16 { + unsafe { include::lean_ctor_get_uint16(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_u32(&self, offset: usize) -> u32 { + unsafe { include::lean_ctor_get_uint32(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_u64(&self, offset: usize) -> u64 { + unsafe { include::lean_ctor_get_uint64(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_f64(&self, offset: usize) -> f64 { + unsafe { include::lean_ctor_get_float(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_f32(&self, offset: usize) -> f32 { + unsafe { include::lean_ctor_get_float32(self.0.as_raw(), to_u32(offset)) } + } + /// Read a `USize` field. `USize` fields occupy pointer-width entries in + /// the data area right after object fields. `index` is relative to + /// the first `USize` entry; the object field count is read from the header + /// and added internally. + pub fn get_usize(&self, index: usize) -> usize { + unsafe { include::lean_ctor_get_usize(self.0.as_raw(), to_u32(self.num_objs() + index)) } + } + /// Read a single `Bool` scalar field (`uint8_t`). + /// Returns `true` if the byte is non-zero. + pub fn get_bool(&self, offset: usize) -> bool { + self.get_u8(offset) != 0 } - pub fn get_u8(&self, num_objs: usize, offset: usize) -> u8 { + // ------------------------------------------------------------------------- + // Scalar field setters + // ------------------------------------------------------------------------- + // + // All setters take `offset` as an absolute byte offset from + // `lean_ctor_obj_cptr`, matching the Lean C API convention. + // Available on all R: LeanRef (not restricted to LeanOwned). + + pub fn set_u8(&self, offset: usize, val: u8) { unsafe { - include::lean_ctor_get_uint8(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); } } - pub fn get_u16(&self, num_objs: usize, offset: usize) -> u16 { + pub fn set_u16(&self, offset: usize, val: u16) { unsafe { - include::lean_ctor_get_uint16(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); } } - pub fn get_u32(&self, num_objs: usize, offset: usize) -> u32 { + pub fn set_u32(&self, offset: usize, val: u32) { unsafe { - include::lean_ctor_get_uint32(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); } } - pub fn get_u64(&self, num_objs: usize, offset: usize) -> u64 { + pub fn set_u64(&self, offset: usize, val: u64) { unsafe { - include::lean_ctor_get_uint64(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); } } - pub fn get_f64(&self, num_objs: usize, offset: usize) -> f64 { + pub fn set_f64(&self, offset: usize, val: f64) { unsafe { - include::lean_ctor_get_float(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); } } - pub fn get_f32(&self, num_objs: usize, offset: usize) -> f32 { + pub fn set_f32(&self, offset: usize, val: f32) { unsafe { - include::lean_ctor_get_float32(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) + include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); } } - /// Read a `usize` at slot `slot` past `num_objs` object fields. - /// Uses a **slot index** (not byte offset). - #[allow(clippy::cast_possible_truncation)] - pub fn get_usize(&self, num_objs: usize, slot: usize) -> usize { - unsafe { include::lean_ctor_get_usize(self.0.as_raw(), (num_objs + slot) as u32) } + /// Set a `USize` field. `USize` fields occupy pointer-width entries in + /// the data area right after object fields. `index` is relative to + /// the first `USize` entry; the object field count is read from the header + /// and added internally. + pub fn set_usize(&self, index: usize, val: usize) { + unsafe { + include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); + } } - pub fn get_bool(&self, num_objs: usize, offset: usize) -> bool { - self.get_u8(num_objs, offset) != 0 + /// Write a single `Bool` scalar field (`uint8_t`, 0 or 1). + pub fn set_bool(&self, offset: usize, val: bool) { + self.set_u8(offset, val as u8); } } @@ -1003,18 +1152,17 @@ impl LeanCtor { /// Allocate a new constructor object. pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { - #[allow(clippy::cast_possible_truncation)] - let obj = - unsafe { include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) }; + let obj = unsafe { + include::lean_alloc_ctor(u32::from(tag), to_u32(num_objs), to_u32(scalar_size)) + }; Self(LeanOwned(obj)) } /// Set the `i`-th object field. Takes ownership of `val`. pub fn set(&self, i: usize, val: impl Into) { let val: LeanOwned = val.into(); - #[allow(clippy::cast_possible_truncation)] unsafe { - include::lean_ctor_set(self.0.as_raw(), i as u32, val.into_raw()); + include::lean_ctor_set(self.0.as_raw(), to_u32(i), val.into_raw()); } } @@ -1026,76 +1174,6 @@ impl LeanCtor { std::mem::forget(self); ptr } - - // ------------------------------------------------------------------------- - // Scalar field setters (owned only — mutation) - // ------------------------------------------------------------------------- - - pub fn set_u8(&self, num_objs: usize, offset: usize, val: u8) { - unsafe { - include::lean_ctor_set_uint8( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - pub fn set_u16(&self, num_objs: usize, offset: usize, val: u16) { - unsafe { - include::lean_ctor_set_uint16( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - pub fn set_u32(&self, num_objs: usize, offset: usize, val: u32) { - unsafe { - include::lean_ctor_set_uint32( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - pub fn set_u64(&self, num_objs: usize, offset: usize, val: u64) { - unsafe { - include::lean_ctor_set_uint64( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - pub fn set_f64(&self, num_objs: usize, offset: usize, val: f64) { - unsafe { - include::lean_ctor_set_float( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - pub fn set_f32(&self, num_objs: usize, offset: usize, val: f32) { - unsafe { - include::lean_ctor_set_float32( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); - } - } - /// Set a `usize` at slot `slot` past `num_objs` object fields. - /// Uses a **slot index** (not byte offset). - #[allow(clippy::cast_possible_truncation)] - pub fn set_usize(&self, num_objs: usize, slot: usize, val: usize) { - unsafe { - include::lean_ctor_set_usize(self.0.as_raw(), (num_objs + slot) as u32, val); - } - } - pub fn set_bool(&self, num_objs: usize, offset: usize, val: bool) { - self.set_u8(num_objs, offset, val as u8); - } } impl From> for LeanOwned { @@ -1108,6 +1186,86 @@ impl From> for LeanOwned { } } +// ============================================================================= +// LeanCtorLayout — layout metadata for structures and inductive variants +// ============================================================================= + +/// Memory layout of a single Lean constructor (one `structure` or one variant +/// of an `inductive`): counts of object, `USize`, and 64/32/16/8-bit scalar +/// fields. +/// +/// Fields are laid out in the order: object → `USize` → descending scalar size +/// (8B → 4B → 2B → 1B). Within a size, Lean preserves declaration order. +/// +/// The tag is not stored here — it's the index of this layout in the owning +/// type's [`LeanCtorLayout::LAYOUTS`] slice. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct SingleCtorLayout { + pub num_obj: usize, + pub num_usize: usize, + pub num_64: usize, + pub num_32: usize, + pub num_16: usize, + pub num_8: usize, +} + +impl SingleCtorLayout { + pub const ZERO: Self = Self { + num_obj: 0, + num_usize: 0, + num_64: 0, + num_32: 0, + num_16: 0, + num_8: 0, + }; + + /// `scalar_sz` argument for `lean_alloc_ctor`: total bytes needed for all + /// fixed-size scalar fields after the object and `USize` fields. + #[inline] + pub const fn scalar_size(&self) -> usize { + self.num_usize * size_of::() + + self.num_64 * 8 + + self.num_32 * 4 + + self.num_16 * 2 + + self.num_8 + } + + /// Byte offset from `lean_ctor_obj_cptr` to the first fixed-size scalar + /// field, skipping the preceding object and `USize` fields. + #[inline] + pub const fn scalar_base(&self) -> usize { + (self.num_obj + self.num_usize) * size_of::() + } + + #[inline] + pub const fn offset_64(&self, i: usize) -> usize { + self.scalar_base() + i * 8 + } + #[inline] + pub const fn offset_32(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + i * 4 + } + #[inline] + pub const fn offset_16(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + self.num_32 * 4 + i * 2 + } + #[inline] + pub const fn offset_8(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + self.num_32 * 4 + self.num_16 * 2 + i + } +} + +/// Per-constructor layouts for a Lean `structure` or `inductive`. +/// +/// `LAYOUTS[tag]` is the [`SingleCtorLayout`] for the constructor with that +/// Lean tag. A `structure` yields a 1-element slice (tag 0); an `inductive` +/// yields one entry per variant in declaration order. +/// +/// Implemented automatically by [`lean_inductive!`](crate::lean_inductive). +pub trait LeanCtorLayout { + const LAYOUTS: &'static [SingleCtorLayout]; +} + // ============================================================================= // LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) // ============================================================================= @@ -1912,3 +2070,109 @@ impl LeanRef for LeanShared { self.0.as_raw() } } + +// ============================================================================= +// Unit tests for layout math (no Lean runtime needed) +// ============================================================================= + +#[cfg(test)] +mod layout_tests { + use super::*; + + #[test] + fn zero_layout_has_zero_size() { + let z = SingleCtorLayout::ZERO; + assert_eq!(z.num_obj, 0); + assert_eq!(z.scalar_size(), 0); + assert_eq!(z.scalar_base(), 0); + } + + #[test] + fn scalar_size_sums_all_scalar_sizes() { + let l = SingleCtorLayout { + num_obj: 0, + num_usize: 2, + num_64: 3, + num_32: 4, + num_16: 5, + num_8: 7, + }; + let expected = 2 * size_of::() + 3 * 8 + 4 * 4 + 5 * 2 + 7; + assert_eq!(l.scalar_size(), expected); + } + + #[test] + fn scalar_base_skips_obj_and_usize() { + let l = SingleCtorLayout { + num_obj: 3, + num_usize: 2, + ..SingleCtorLayout::ZERO + }; + // 3 object fields + 2 USize fields, all pointer-width + assert_eq!(l.scalar_base(), 5 * size_of::()); + } + + #[test] + fn offsets_stack_by_descending_size() { + let l = SingleCtorLayout { + num_obj: 1, + num_usize: 1, + num_64: 2, + num_32: 2, + num_16: 1, + num_8: 3, + }; + let base = l.scalar_base(); + // 8-byte scalars start at `base`. + assert_eq!(l.offset_64(0), base); + assert_eq!(l.offset_64(1), base + 8); + // 4-byte scalars start after 2 u64s. + assert_eq!(l.offset_32(0), base + 16); + assert_eq!(l.offset_32(1), base + 16 + 4); + // 2-byte scalars start after 2 u64s + 2 u32s. + assert_eq!(l.offset_16(0), base + 16 + 8); + // 1-byte scalars start after 2 u64s + 2 u32s + 1 u16. + assert_eq!(l.offset_8(0), base + 16 + 8 + 2); + assert_eq!(l.offset_8(2), base + 16 + 8 + 2 + 2); + } + + #[test] + fn offsets_skip_absent_scalar_sizes() { + // u64 + u8, no u32/u16 fields. + let l = SingleCtorLayout { + num_64: 1, + num_8: 1, + ..SingleCtorLayout::ZERO + }; + assert_eq!(l.offset_64(0), 0); + // u8 sits immediately after the u64: no u32/u16 to skip. + assert_eq!(l.offset_8(0), 8); + assert_eq!(l.scalar_size(), 8 + 1); + } + + #[test] + fn layout_slice_is_usable_in_const_context() { + // Confirms SingleCtorLayout can be read through LeanCtorLayout in const + // code — this is what the generated accessor methods rely on. + struct Marker; + impl LeanCtorLayout for Marker { + const LAYOUTS: &'static [SingleCtorLayout] = &[ + SingleCtorLayout { + num_obj: 1, + ..SingleCtorLayout::ZERO + }, + SingleCtorLayout { + num_64: 2, + ..SingleCtorLayout::ZERO + }, + ]; + } + const L0: SingleCtorLayout = ::LAYOUTS[0]; + const L1: SingleCtorLayout = ::LAYOUTS[1]; + const OFF: usize = L1.offset_64(1); + + assert_eq!(L0.num_obj, 1); + assert_eq!(L1.num_64, 2); + assert_eq!(OFF, 8); + } +} diff --git a/src/test_ffi.rs b/src/test_ffi.rs index f1c86e9..8165d8f 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -22,18 +22,61 @@ use crate::object::{ // ============================================================================= crate::lean_domain_type! { + /// Lean `RustData` — opaque external object + LeanRustData; +} + +crate::lean_inductive! { /// Lean `Point` — structure Point where x : Nat; y : Nat - LeanPoint; - /// Lean `NatTree` — inductive NatTree | leaf : Nat → NatTree | node : NatTree → NatTree → NatTree - LeanNatTree; + LeanPoint [ { num_obj: 2 } ]; + + /// Lean `NatTree` — | leaf (n : Nat) | node (l r : NatTree) + /// Recursive inductive: the `node` variant's object fields point to more + /// `NatTree` values, so this exercises nested-inductive access. + LeanNatTree [ + { num_obj: 1 }, // tag 0 — leaf (1 Nat) + { num_obj: 2 }, // tag 1 — node (2 NatTree children) + ]; + /// Lean `ScalarStruct` — structure ScalarStruct where obj : Nat; u8val : UInt8; u32val : UInt32; u64val : UInt64 - LeanScalarStruct; + LeanScalarStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ]; + /// Lean `ExtScalarStruct` — all scalar types - LeanExtScalarStruct; + LeanExtScalarStruct [ { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } ]; + /// Lean `USizeStruct` — structure USizeStruct where obj : Nat; uval : USize; u8val : UInt8 - LeanUSizeStruct; - /// Lean `RustData` — opaque external object - LeanRustData; + LeanUSizeStruct [ { num_obj: 1, num_usize: 1, num_8: 1 } ]; + + /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool + LeanUSizeMixedStruct [ { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } ]; + + /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool + LeanBoolStruct [ { num_obj: 1, num_8: 3 } ]; + + /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 + LeanMultiU32Struct [ { num_obj: 1, num_32: 3 } ]; + + /// Lean `Outer` — structure containing ScalarStruct + String + UInt64 + LeanOuter [ { num_obj: 2, num_64: 1 } ]; + + /// Lean `InductiveHolder` — structure containing TestInductive + UInt32 + LeanInductiveHolder [ { num_obj: 1, num_32: 1 } ]; + + /// Lean `TestInductive` — | empty | withScalars (a b : UInt64) | withMixed (obj : Nat) (x : UInt32) (flag : Bool) + /// Tag 0 is scalar-unboxed by Lean; tags 1–2 are ctor objects. + LeanTestInductive [ + { }, // tag 0 — empty + { num_64: 2 }, // tag 1 — withScalars (2 u64) + { num_obj: 1, num_32: 1, num_8: 1 }, // tag 2 — withMixed (1 obj + 1 u32 + 1 bool) + ]; + + /// Lean `StructInVariant` — variants carry structure-typed fields. + /// | empty | withPoint (p : Point) | withScalar (s : ScalarStruct) (extra : UInt32) + LeanStructInVariant [ + { }, // tag 0 — empty + { num_obj: 1 }, // tag 1 — withPoint (1 obj: Point) + { num_obj: 1, num_32: 1 }, // tag 2 — withScalar (1 obj: ScalarStruct, 1 u32) + ]; } /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). @@ -126,13 +169,12 @@ pub(crate) extern "C" fn rs_roundtrip_option_nat( pub(crate) extern "C" fn rs_roundtrip_point( point_ptr: LeanPoint>, ) -> LeanPoint { - let ctor = point_ptr.as_ctor(); - let x = Nat::from_obj(&ctor.get(0)); - let y = Nat::from_obj(&ctor.get(1)); - let out = LeanCtor::alloc(0, 2, 0); - out.set(0, build_nat(&x)); - out.set(1, build_nat(&y)); - LeanPoint::new(out.into()) + let x = Nat::from_obj(&point_ptr.get_obj(0)); + let y = Nat::from_obj(&point_ptr.get_obj(1)); + let out = LeanPoint::alloc(0); + out.set_obj(0, build_nat(&x)); + out.set_obj(1, build_nat(&y)); + out } /// Round-trip a NatTree (inductive: leaf Nat | node NatTree NatTree). @@ -140,28 +182,28 @@ pub(crate) extern "C" fn rs_roundtrip_point( pub(crate) extern "C" fn rs_roundtrip_nat_tree( tree_ptr: LeanNatTree>, ) -> LeanNatTree { - LeanNatTree::new(roundtrip_nat_tree_recursive(&tree_ptr.as_ctor())) + roundtrip_nat_tree(&tree_ptr) } -fn roundtrip_nat_tree_recursive(ctor: &LeanCtor) -> LeanOwned { - match ctor.tag() { +fn roundtrip_nat_tree(tree: &LeanNatTree) -> LeanNatTree { + match tree.as_ctor().tag() { 0 => { // leaf : Nat → NatTree - let nat = Nat::from_obj(&ctor.get(0)); - let leaf = LeanCtor::alloc(0, 1, 0); - leaf.set(0, build_nat(&nat)); - leaf.into() + let nat = Nat::from_obj(&tree.get_obj(0)); + let out = LeanNatTree::alloc(0); + out.set_obj(0, build_nat(&nat)); + out } 1 => { - // node : NatTree → NatTree → NatTree - let left = roundtrip_nat_tree_recursive(&ctor.get(0).as_ctor()); - let right = roundtrip_nat_tree_recursive(&ctor.get(1).as_ctor()); - let node = LeanCtor::alloc(1, 2, 0); - node.set(0, left); - node.set(1, right); - node.into() + // node : NatTree → NatTree → NatTree (recurse into children) + let left = roundtrip_nat_tree(&LeanNatTree::from_ctor(tree.get_obj(0).as_ctor())); + let right = roundtrip_nat_tree(&LeanNatTree::from_ctor(tree.get_obj(1).as_ctor())); + let out = LeanNatTree::alloc(1); + out.set_obj(0, left); + out.set_obj(1, right); + out } - _ => panic!("Invalid NatTree tag: {}", ctor.tag()), + tag => panic!("Invalid NatTree tag: {tag}"), } } @@ -233,25 +275,24 @@ pub(crate) extern "C" fn rs_io_result_error_string( // LeanCtor scalar fields // ============================================================================= -/// Round-trip a ScalarStruct. -/// Lean layout: 1 obj field, then scalars by descending size: u64(0), u32(8), u8(12). +/// Round-trip a ScalarStruct via generated accessors. +/// Lean layout: 1 obj field, then scalars by descending size: u64, u32, u8. /// Total scalar size: 8 + 4 + 1 = 13 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_scalar_struct( ptr: LeanScalarStruct>, ) -> LeanScalarStruct { - let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let u64val = ctor.get_u64(1, 0); - let u32val = ctor.get_u32(1, 8); - let u8val = ctor.get_u8(1, 12); + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0); + let u8val = ptr.get_num_8(0); - let out = LeanCtor::alloc(0, 1, 13); - out.set(0, build_nat(&obj_nat)); - out.set_u64(1, 0, u64val); - out.set_u32(1, 8, u32val); - out.set_u8(1, 12, u8val); - LeanScalarStruct::new(out.into()) + let out = LeanScalarStruct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_64(0, u64val); + out.set_num_32(0, u32val); + out.set_num_8(0, u8val); + out } // ============================================================================= @@ -360,54 +401,279 @@ pub(crate) extern "C" fn rs_external_get_label( // Extended scalar struct roundtrip (u8, u16, u32, u64, f64, f32) // ============================================================================= -/// Round-trip an ExtScalarStruct. -/// Lean layout: 1 obj, then descending size: u64(0), f64(8), u32(16), f32(20), u16(24), u8(26). +/// Round-trip an ExtScalarStruct via generated accessors. +/// Lean layout: 1 obj, then descending size: u64, f64, u32, f32, u16, u8. /// Total scalar: 27 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ptr: LeanExtScalarStruct>, ) -> LeanExtScalarStruct { - let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let u64val = ctor.get_u64(1, 0); - let fval = ctor.get_f64(1, 8); - let u32val = ctor.get_u32(1, 16); - let f32val = ctor.get_f32(1, 20); - let u16val = ctor.get_u16(1, 24); - let u8val = ctor.get_u8(1, 26); - - let out = LeanCtor::alloc(0, 1, 27); - out.set(0, build_nat(&obj_nat)); - out.set_u64(1, 0, u64val); - out.set_f64(1, 8, fval); - out.set_u32(1, 16, u32val); - out.set_f32(1, 20, f32val); - out.set_u16(1, 24, u16val); - out.set_u8(1, 26, u8val); - LeanExtScalarStruct::new(out.into()) + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + // 8-byte scalars: u64 at index 0, f64 at index 1 + let u64val = ptr.get_num_64(0); + let fval = f64::from_bits(ptr.get_num_64(1)); + // 4-byte scalars: u32 at index 0, f32 at index 1 + let u32val = ptr.get_num_32(0); + let f32val = f32::from_bits(ptr.get_num_32(1)); + let u16val = ptr.get_num_16(0); + let u8val = ptr.get_num_8(0); + + let out = LeanExtScalarStruct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_64(0, u64val); + out.set_num_64(1, fval.to_bits()); + out.set_num_32(0, u32val); + out.set_num_32(1, f32val.to_bits()); + out.set_num_16(0, u16val); + out.set_num_8(0, u8val); + out } // ============================================================================= // USize struct roundtrip // ============================================================================= -/// Round-trip a USizeStruct. -/// Lean layout: 1 obj field, then usize (slot 0), then u8 at scalar offset 0. -/// Alloc: num_objs=1, scalar_sz=9 (8 for usize slot + 1 for u8). +/// Round-trip a USizeStruct via generated accessors. +/// Lean layout: 1 obj field, 1 usize field, then u8. +/// Alloc: num_objs=1, scalar_sz=9 (8 for usize + 1 for u8). #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_usize_struct( ptr: LeanUSizeStruct>, ) -> LeanUSizeStruct { + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let uval = ptr.get_usize(0); + let u8val = ptr.get_num_8(0); + + let out = LeanUSizeStruct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_usize(0, uval); + out.set_num_8(0, u8val); + out +} + +/// Round-trip a USizeMixedStruct via generated accessors. +/// Lean layout: 1 obj field, 1 usize field, then scalars: u64, u32, bool. +/// Total scalar size: 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( + ptr: LeanUSizeMixedStruct>, +) -> LeanUSizeMixedStruct { + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let uval = ptr.get_usize(0); + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0); + let bval = ptr.get_num_8(0) != 0; + + let out = LeanUSizeMixedStruct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_usize(0, uval); + out.set_num_64(0, u64val); + out.set_num_32(0, u32val); + out.set_num_8(0, bval as u8); + out +} + +// ============================================================================= +// BoolStruct / MultiU32Struct roundtrips (batch scalar access) +// ============================================================================= + +/// Round-trip a BoolStruct via generated accessors. +/// Lean layout: 1 obj field, then 3 Bool scalars. +/// Total scalar size: 3 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_bool_struct( + ptr: LeanBoolStruct>, +) -> LeanBoolStruct { + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let b1 = ptr.get_num_8(0); + let b2 = ptr.get_num_8(1); + let b3 = ptr.get_num_8(2); + + let out = LeanBoolStruct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_8(0, b1); + out.set_num_8(1, b2); + out.set_num_8(2, b3); + out +} + +/// Round-trip a MultiU32Struct via generated accessors. +/// Lean layout: 1 obj field, then 3 UInt32 scalars. +/// Total scalar size: 12 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( + ptr: LeanMultiU32Struct>, +) -> LeanMultiU32Struct { + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let a = ptr.get_num_32(0); + let b = ptr.get_num_32(1); + let c = ptr.get_num_32(2); + + let out = LeanMultiU32Struct::alloc(0); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_32(0, a); + out.set_num_32(1, b); + out.set_num_32(2, c); + out +} + +// ============================================================================= +// TestInductive roundtrip (multi-variant with non-zero tags) +// ============================================================================= + +/// Round-trip a TestInductive. +/// - tag 0 (empty): no fields, scalar representation +/// - tag 1 (withScalars): 2 u64 fields +/// - tag 2 (withMixed): 1 obj + 1 u32 + 1 bool +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_test_inductive( + ptr: LeanTestInductive>, +) -> LeanTestInductive { + // Tag 0 (empty) has no fields — Lean represents as a scalar + if ptr.inner().is_scalar() { + return LeanTestInductive::new(ptr.inner().to_owned_ref()); + } + + let tag = ptr.as_ctor().tag(); + match tag { + 1 => { + // withScalars: 2 u64 fields + let a = ptr.get_num_64(0); + let b = ptr.get_num_64(1); + + let dst = LeanTestInductive::alloc(1); + dst.set_num_64(0, a); + dst.set_num_64(1, b); + dst + } + 2 => { + // withMixed: 1 obj + 1 u32 + 1 bool + let obj_nat = Nat::from_obj(&ptr.get_obj(0)); + let x = ptr.get_num_32(0); + let flag = ptr.get_num_8(0); + + let dst = LeanTestInductive::alloc(2); + dst.set_obj(0, build_nat(&obj_nat)); + dst.set_num_32(0, x); + dst.set_num_8(0, flag); + dst + } + _ => unreachable!("Invalid TestInductive tag: {tag}"), + } +} + +// ============================================================================= +// Nested structure roundtrip +// ============================================================================= + +/// Round-trip an Outer (structure containing ScalarStruct + String + UInt64). +/// Lean layout: 2 obj fields (inner : ScalarStruct, label : String), 1 u64 scalar. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_outer( + ptr: LeanOuter>, +) -> LeanOuter { + // Read: extract nested structure and scalar fields let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let uval = ctor.get_usize(1, 0); - let u8val = ctor.get_u8(2, 0); + let inner_ref = ctor.get(0); // ScalarStruct (object field 0) + let label_ref = ctor.get(1); // String (object field 1) + let count = ptr.get_num_64(0); + + // Roundtrip the inner ScalarStruct through its own trait + let inner_wrapped = LeanScalarStruct(inner_ref); + let inner_u64 = inner_wrapped.get_num_64(0); + let inner_u32 = inner_wrapped.get_num_32(0); + let inner_u8 = inner_wrapped.get_num_8(0); + let inner_obj = Nat::from_obj(&inner_wrapped.as_ctor().get(0)); + + let new_inner = LeanScalarStruct::alloc(0); + new_inner.set_obj(0, build_nat(&inner_obj)); + new_inner.set_num_64(0, inner_u64); + new_inner.set_num_32(0, inner_u32); + new_inner.set_num_8(0, inner_u8); + + // Roundtrip the label string + let new_label = LeanString::new(&label_ref.as_string().to_string()); + + // Write: construct new Outer + let out = LeanOuter::alloc(0); + out.set_obj(0, new_inner); + out.set_obj(1, new_label); + out.set_num_64(0, count); + out +} + +/// Round-trip an InductiveHolder (structure containing TestInductive + UInt32). +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_inductive_holder( + ptr: LeanInductiveHolder>, +) -> LeanInductiveHolder { + // Read the inductive value (object field 0) and scalar + let ctor = ptr.as_ctor(); + let value_ref = ctor.get(0); + let tag_copy = ptr.get_num_32(0); + + // Roundtrip the inner TestInductive + let new_value = rs_roundtrip_test_inductive(LeanTestInductive(value_ref)); + + // Write + let out = LeanInductiveHolder::alloc(0); + out.set_obj(0, new_value); + out.set_num_32(0, tag_copy); + out +} + +/// Round-trip a StructInVariant — an inductive whose variants hold structures. +/// +/// Exercises variant-dispatched access where the variant's payload is itself +/// a ctor-backed Lean structure (`Point` / `ScalarStruct`), re-using those +/// structures' own generated accessors for the inner reads and writes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_struct_in_variant( + ptr: LeanStructInVariant>, +) -> LeanStructInVariant { + // Tag 0 (empty) — scalar-unboxed by Lean. + if ptr.inner().is_scalar() { + return LeanStructInVariant::new(ptr.inner().to_owned_ref()); + } - let out = LeanCtor::alloc(0, 1, 9); - out.set(0, build_nat(&obj_nat)); - out.set_usize(1, 0, uval); - out.set_u8(2, 0, u8val); - LeanUSizeStruct::new(out.into()) + let tag = ptr.as_ctor().tag(); + match tag { + 1 => { + // withPoint: clone the Point out and rebuild it via LeanPoint's accessors. + let src_pt = LeanPoint::from_ctor(ptr.get_obj(0).as_ctor()); + let x = Nat::from_obj(&src_pt.get_obj(0)); + let y = Nat::from_obj(&src_pt.get_obj(1)); + + let new_pt = LeanPoint::alloc(0); + new_pt.set_obj(0, build_nat(&x)); + new_pt.set_obj(1, build_nat(&y)); + + let dst = LeanStructInVariant::alloc(1); + dst.set_obj(0, new_pt); + dst + } + 2 => { + // withScalar: clone the ScalarStruct out + copy the trailing u32. + let src_ss = LeanScalarStruct::from_ctor(ptr.get_obj(0).as_ctor()); + let obj_nat = Nat::from_obj(&src_ss.get_obj(0)); + let u64val = src_ss.get_num_64(0); + let u32val = src_ss.get_num_32(0); + let u8val = src_ss.get_num_8(0); + let extra = ptr.get_num_32(0); + + let new_ss = LeanScalarStruct::alloc(0); + new_ss.set_obj(0, build_nat(&obj_nat)); + new_ss.set_num_64(0, u64val); + new_ss.set_num_32(0, u32val); + new_ss.set_num_8(0, u8val); + + let dst = LeanStructInVariant::alloc(2); + dst.set_obj(0, new_ss); + dst.set_num_32(0, extra); + dst + } + _ => unreachable!("Invalid StructInVariant tag: {tag}"), + } } // ============================================================================= @@ -643,20 +909,12 @@ pub(crate) extern "C" fn rs_owned_prod_multiply(pair: LeanProd) -> Le Nat(fst.0 * snd.0).to_lean() } -/// Owned ScalarStruct: sum all scalar fields. -/// ScalarStruct { obj : Nat, u8val : UInt8, u32val : UInt32, u64val : UInt64 } -/// Lean reorders scalar fields by descending size: -/// u64val at scalar offset 0, u32val at offset 8, u8val at offset 12 -/// Note: roundtrip tests use declaration-order offsets (0, 1, 5) which happen -/// to roundtrip correctly because both read and write use the same offsets. -/// But for computing actual values, we must use the real layout. +/// Owned ScalarStruct: sum all scalar fields via generated accessors. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_owned_scalar_sum(ptr: LeanScalarStruct) -> u64 { - // Lean descending-size layout: u64(0), u32(8), u8(12) - let ctor = ptr.as_ctor(); - let u64val = ctor.get_u64(1, 0); - let u32val = ctor.get_u32(1, 8) as u64; - let u8val = ctor.get_u8(1, 12) as u64; + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0) as u64; + let u8val = ptr.get_num_8(0) as u64; u64val + u32val + u8val // ptr drops here → lean_dec } @@ -1569,3 +1827,105 @@ pub(crate) extern "C" fn rs_shared_persistent_nat( ); first.to_lean() } + +// ============================================================================= +// Unit tests for generated LAYOUTS (no Lean runtime needed) +// ============================================================================= + +#[cfg(test)] +mod layout_sanity { + use crate::object::{LeanCtorLayout, LeanOwned, SingleCtorLayout}; + + type Scalar = super::LeanScalarStruct; + type Ext = super::LeanExtScalarStruct; + type USize_ = super::LeanUSizeStruct; + type UMixed = super::LeanUSizeMixedStruct; + type Bools = super::LeanBoolStruct; + type MultiU32 = super::LeanMultiU32Struct; + type Outer = super::LeanOuter; + type Holder = super::LeanInductiveHolder; + type Ind = super::LeanTestInductive; + + /// The single layout for a structure (1-variant type). + fn struct_layout() -> SingleCtorLayout { + let layouts = ::LAYOUTS; + assert_eq!(layouts.len(), 1, "expected a 1-variant (structure) type"); + layouts[0] + } + + #[test] + fn scalar_struct_layout() { + let l = struct_layout::(); + assert_eq!(l.num_obj, 1); + assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (1, 1, 0, 1)); + assert_eq!(l.scalar_size(), 8 + 4 + 1); + } + + #[test] + fn ext_scalar_struct_layout() { + let l = struct_layout::(); + assert_eq!(l.num_obj, 1); + assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (2, 2, 1, 1)); + assert_eq!(l.offset_32(0), l.scalar_base() + 16); + assert_eq!(l.offset_8(0), l.scalar_base() + 16 + 8 + 2); + } + + #[test] + fn usize_struct_layout() { + let l = struct_layout::(); + assert_eq!(l.num_usize, 1); + assert_eq!(l.num_8, 1); + // USize sits between obj fields and fixed-width scalars. + assert_eq!(l.scalar_base(), (1 + 1) * size_of::()); + } + + #[test] + fn usize_mixed_struct_layout() { + let l = struct_layout::(); + assert_eq!( + (l.num_obj, l.num_usize, l.num_64, l.num_32, l.num_8), + (1, 1, 1, 1, 1) + ); + } + + #[test] + fn bool_struct_layout() { + let l = struct_layout::(); + assert_eq!(l.num_8, 3); + assert_eq!(l.offset_8(2), l.scalar_base() + 2); + } + + #[test] + fn multi_u32_struct_layout() { + let l = struct_layout::(); + assert_eq!(l.num_32, 3); + assert_eq!(l.offset_32(2), l.scalar_base() + 8); + } + + #[test] + fn outer_layout() { + let l = struct_layout::(); + assert_eq!((l.num_obj, l.num_64), (2, 1)); + } + + #[test] + fn inductive_holder_layout() { + let l = struct_layout::(); + assert_eq!((l.num_obj, l.num_32), (1, 1)); + } + + #[test] + fn inductive_variant_layouts() { + let layouts = ::LAYOUTS; + assert_eq!(layouts.len(), 3); + // tag 0: empty + assert_eq!(layouts[0], SingleCtorLayout::ZERO); + // tag 1: withScalars (2 u64) + assert_eq!(layouts[1].num_64, 2); + // tag 2: withMixed (1 obj + 1 u32 + 1 bool) + assert_eq!( + (layouts[2].num_obj, layouts[2].num_32, layouts[2].num_8), + (1, 1, 1) + ); + } +}